Exemplo n.º 1
0
/*
 * Create font object
 * @overload new(face, font_option = nil)
 * @param face [Symbol] Font name identifier. Only a subset of Hershey fonts (http://sources.isc.org/utils/misc/hershey-font.txt) are supported now:
 *   - :simplex - normal size sans-serif font
 *   - :plain - small size sans-serif font
 *   - :duplex - normal size sans-serif font (more complex than :simplex)
 *   - :complex - normal size serif font
 *   - :triplex - normal size serif font (more complex than :complex)
 *   - :complex_small - smaller version of :complex
 *   - :script_simplex - hand-writing style font
 *   - :script_complex - more complex variant of :script_simplex
 *
 * @param font_option [Hash] should be Hash include these keys.
 * @option font_option [Number] :hscale Horizontal scale. If equal to 1.0, the characters have the original width depending on the font type. If equal to 0.5, the characters are of half the original width.
 * @option font_option [Number] :vscale Vertical scale. If equal to 1.0, the characters have the original height depending on the font type. If equal to 0.5, the characters are of half the original height.
 * @option font_option [Number] :shear Approximate tangent of the character slope relative to the vertical line. Zero value means a non-italic font, 1.0f means ~45 degree slope, etc.
 * @option font_option [Number] :thickness Thickness of the text strokes.
 * @option font_option [Number] :line_type Type of the strokes, see CvMat#Line description.
 * @option font_option [Number] :italic If value is not nil or false that means italic or oblique font.
 *
 * @example Create Font
 *   OpenCV::CvFont.new(:simplex, :hscale => 2, :vslace => 2, :italic => true)
 *   # create 2x bigger than normal, italic type font.
 *
 * @opencv_func cvInitFont
 */
VALUE
rb_initialize(int argc, VALUE *argv, VALUE self)
{
  VALUE face, font_option;
  rb_scan_args(argc, argv, "11", &face, &font_option);
  Check_Type(face, T_SYMBOL);
  face = rb_hash_lookup(rb_const_get(cCvFont::rb_class(), rb_intern("FACE")), face);
  if (NIL_P(face)) {
    rb_raise(rb_eArgError, "undefined face.");
  }
  font_option = FONT_OPTION(font_option);

  int font_face = NUM2INT(face);
  if (FO_ITALIC(font_option)) {
    font_face |= CV_FONT_ITALIC;
  }
  try {
    cvInitFont(CVFONT(self),
	       font_face,
	       FO_HSCALE(font_option),
	       FO_VSCALE(font_option),
	       FO_SHEAR(font_option),
	       FO_THICKNESS(font_option),
	       FO_LINE_TYPE(font_option));
  }
  catch (cv::Exception& e) {
    raise_cverror(e);
  }

  return self;
}
Exemplo n.º 2
0
/*
 * call-seq:
 *   IplConvKernel.new(cols, rows, anchor_x, anchor_y, shape [,values = nil])
 *
 * Creates structuring element.
 *  cols
 *    Number of columns in the structuring element. 
 *  rows
 *    Number of rows in the structuring element. 
 *  anchor_x
 *    Relative horizontal offset of the anchor point. 
 *  anchor_y
 *    Relative vertical offset of the anchor point.
 *  shape
 *    Shape of the structuring element; may have the following values:
 *     :rect
 *     :cross
 *     :ellipse
 *     :custom
 *
 */
VALUE
rb_initialize(int argc, VALUE *argv, VALUE self)
{
  VALUE shape, rows, cols, anchor_x, anchor_y, values;
  rb_scan_args(argc, argv, "51", &cols, &rows, &anchor_x, &anchor_y, &shape, &values);
  int shape_type;
  int _cols = NUM2INT(cols);
  int _rows = NUM2INT(rows);
  int num_values;
  int *_values = NULL;
  const int INVALID_SHAPE = -1;
  
  shape_type = CVMETHOD("STRUCTURING_ELEMENT_SHAPE", shape, INVALID_SHAPE);
  if (shape_type == INVALID_SHAPE)
    rb_raise(rb_eTypeError, "argument 1 (shape) should be :rect or :cross or :ellipse or :custom.");
  if (shape_type == CV_SHAPE_CUSTOM) {
    if (NIL_P(values))
      rb_raise(rb_eArgError, "argument 6 (values) should not be nil when the shape is :custom.");
    num_values = RARRAY_LEN(values);
    _values = ALLOCA_N(int, num_values);
    VALUE *values_ptr = RARRAY_PTR(values);
    for (int i = 0; i < num_values; ++i)
      _values[i] = NUM2INT(values_ptr[i]);
  }
  try {
    DATA_PTR(self) = rb_cvCreateStructuringElementEx(_cols, _rows, NUM2INT(anchor_x), NUM2INT(anchor_y),
						     shape_type, _values);
  }
  catch (cv::Exception& e) {
    raise_cverror(e);
  }
  return self;
}
Exemplo n.º 3
0
/*
 * Determines whether the point is inside a contour, outside, or lies on an edge (or coinsides with a vertex).
 * @overload point_polygon_test(point, measure_dist)
 *   @param point [CvPoint2D32f] Point tested against the contour
 *   @param measure_dist [Boolean] If true, the method estimates the signed distance from the point to
 *      the nearest contour edge. Otherwise, the function only checks if the point is inside a contour or not.
 * @return [Number] When measure_dist = false, the return value is +1, -1 and 0, respectively.
 *   When measure_dist = true, it is a signed distance between the point and the nearest contour edge.
 * @opencv_func cvPointPolygonTest
 */
VALUE
rb_point_polygon_test(VALUE self, VALUE point, VALUE measure_dist)
{
  int measure_dist_flag;

  if (measure_dist == Qtrue)
    measure_dist_flag = 1;
  else if (measure_dist == Qfalse)
    measure_dist_flag = 0;
  else
    measure_dist_flag = NUM2INT(measure_dist);

  double dist = Qnil;
  try {
    dist = cvPointPolygonTest(CVARR(self), VALUE_TO_CVPOINT2D32F(point), measure_dist_flag);
  }
  catch (cv::Exception& e) {
    raise_cverror(e);
  }
  
  /* cvPointPolygonTest returns 100, -100 or 0 when measure_dist = 0 */
  if ((!measure_dist_flag) && ((int)dist) != 0)
    dist = (dist > 0) ? 1 : -1;

  return rb_float_new(dist);
}
Exemplo n.º 4
0
/*
 * call-seq:
 *   CvCapture.open(<i>[dev = -1]</i>)
 *
 * Reading video stream from the specified file or camera device.
 * If <i>dev</i> is string (i.e "stream.avi"), reading video stream from file.
 * If <i>dev</i> is number or symbol(include CvCapture::INTERFACE),
 * reading video stream from camera. 
 * Currently two camera interfaces can be used on Windows:
 * * Video for Windows(VFW)
 * * Matrox Imaging Library(MIL)
 * and two on Linux
 * * V4L
 * * FireWire(IEEE1394).
 * If there is only one camera or it does not matter what camera to use <i>nil</i> may be passed.
 */
VALUE
rb_open(int argc, VALUE *argv, VALUE self)
{
  VALUE device;
  rb_scan_args(argc, argv, "01", &device);
  CvCapture *capture = 0;
  try {
    switch (TYPE(device)) {
    case T_STRING:
      capture = cvCaptureFromFile(StringValueCStr(device));
      break;
    case T_FIXNUM:
      capture = cvCaptureFromCAM(FIX2INT(device));
      break;
    case T_SYMBOL: {
      VALUE cap_index = rb_hash_lookup(rb_const_get(rb_class(), rb_intern("INTERFACE")), device);
      if (NIL_P(cap_index))
        rb_raise(rb_eArgError, "undefined interface.");
      capture = cvCaptureFromCAM(NUM2INT(cap_index));
      break;
    }
    case T_NIL:
      capture = cvCaptureFromCAM(CV_CAP_ANY);
      break;
    }
  }
  catch (cv::Exception& e) {
    raise_cverror(e);
  }
  if (!capture)
    rb_raise(rb_eStandardError, "Invalid capture format.");
  return Data_Wrap_Struct(rb_klass, 0, cvcapture_free, capture);
}
Exemplo n.º 5
0
/*
 * Constructor
 *
 * @overload new(seq_flags = CV_SEQ_ELTYPE_POINT | CV_SEQ_KIND_GENERIC, storage = nil)
 *   @param [Fixnum] seq_flags Flags of the created sequence, which are combinations of
 *     the element types and sequence types.
 *     - Element type:
 *       - <tt>CV_SEQ_ELTYPE_POINT</tt>: {CvPoint}
 *       - <tt>CV_32FC2</tt>: {CvPoint2D32f}
 *       - <tt>CV_SEQ_ELTYPE_POINT3D</tt>: {CvPoint3D32f}
 *       - <tt>CV_SEQ_ELTYPE_INDEX</tt>: Fixnum
 *       - <tt>CV_SEQ_ELTYPE_CODE</tt>: Fixnum (Freeman code)
 *     - Sequence type:
 *       - <tt>CV_SEQ_KIND_GENERIC</tt>: Generic sequence
 *       - <tt>CV_SEQ_KIND_CURVE</tt>: Curve
 *   @param [CvMemStorage] storage Sequence location
 * @return [CvContour] self
 * @opencv_func cvCreateSeq
 * @example
 *   seq = CvContour.new(CV_SEQ_ELTYPE_POINT | CV_SEQ_KIND_CURVE)
 *   seq << CvPoint.new(1, 2)
 *   seq << 3 #=> TypeError
 */
VALUE
rb_initialize(int argc, VALUE *argv, VALUE self)
{
  VALUE seq_flags_value, storage_value;
  rb_scan_args(argc, argv, "02", &seq_flags_value, &storage_value);

  int seq_flags = 0;
  if (NIL_P(seq_flags_value)) {
    seq_flags = CV_SEQ_ELTYPE_POINT | CV_SEQ_KIND_GENERIC;
  }
  else {
    Check_Type(seq_flags_value, T_FIXNUM);
    seq_flags = FIX2INT(seq_flags_value);
  }
  storage_value = CHECK_CVMEMSTORAGE(storage_value);

  try {
    DATA_PTR(self) = (CvContour*)cCvSeq::create_seq(seq_flags, sizeof(CvContour), storage_value);
  }
  catch (cv::Exception& e) {
    raise_cverror(e);
  }

  return self;
}
Exemplo n.º 6
0
CvSeq*
create_seq(int seq_flags, size_t header_size, VALUE storage_value)
{
  VALUE klass = Qnil;
  int eltype = seq_flags & CV_SEQ_ELTYPE_MASK;
  storage_value = CHECK_CVMEMSTORAGE(storage_value);

  if (!eltype2class(eltype, &klass)) {
    seq_flags = CV_SEQ_ELTYPE_POINT | CV_SEQ_KIND_GENERIC;
  }

  int mat_type = CV_MAT_TYPE(seq_flags);
  size_t elem_size = (size_t)(CV_ELEM_SIZE(mat_type));
  CvSeq* seq = NULL;
  try {
    seq = cvCreateSeq(seq_flags, header_size, elem_size, CVMEMSTORAGE(storage_value));
  }
  catch (cv::Exception& e) {
    raise_cverror(e);
  }
  register_elem_class(seq, klass);
  register_root_object(seq, storage_value);
  
  return seq;
}
Exemplo n.º 7
0
/*
 * call-seq:
 *   sub(val[,mask])
 *
 * Return new CvScalar if <i>val</i> is CvScalar or compatible object.
 *   self[I] - val[I]
 * Or return new CvMat if <i>val</i> is CvMat or subclass.
 */
VALUE
rb_sub(int argc, VALUE *argv, VALUE self)
{
  VALUE val, mask;
  rb_scan_args(argc, argv, "11", &val, &mask);
  if (rb_obj_is_kind_of(val, cCvMat::rb_class())) {
    CvArr *val_ptr = CVARR(val);
    VALUE dest = Qnil;
    try {
      dest = cCvMat::new_object(cvGetSize(val_ptr), cvGetElemType(val_ptr));
      cvSubRS(val_ptr, *CVSCALAR(self), CVARR(dest), MASK(mask));
    }
    catch (cv::Exception& e) {
      raise_cverror(e);
    }
    return dest;
  }
  else {
    CvScalar *src = CVSCALAR(self);
    CvScalar scl = VALUE_TO_CVSCALAR(val);
    return new_object(cvScalar(src->val[0] - scl.val[0],
                               src->val[1] - scl.val[1],
                               src->val[2] - scl.val[2],
                               src->val[3] - scl.val[3]));
  }
}
Exemplo n.º 8
0
/*
 * call-seq:
 *   CvHuMoments.new(<i>src_moments</i>)
 *
 * Calculates the seven Hu invariants.
 * <i>src_moments</i> The input moments
 *
 * seven Hu invariants that are defined as:
 *   h1=η20+η02
 *   h2=(η20-η02)²+4η11²
 *   h3=(η30-3η12)²+ (3η21-η03)²
 *   h4=(η30+η12)²+ (η21+η03)²
 *   h5=(η30-3η12)(η30+η12)[(η30+η12)²-3(η21+η03)²]+(3η21-η03)(η21+η03)[3(η30+η12)²-(η21+η03)²]
 *   h6=(η20-η02)[(η30+η12)²- (η21+η03)²]+4η11(η30+η12)(η21+η03)
 *   h7=(3η21-η03)(η21+η03)[3(η30+η12)²-(η21+η03)²]-(η30-3η12)(η21+η03)[3(η30+η12)²-(η21+η03)²]
 * where ηi,j are normalized central moments of 2-nd and 3-rd orders. The computed values are proved to be invariant to the image scaling, rotation, and reflection except the seventh one, whose sign is changed by reflection. 
 */
VALUE
rb_initialize(VALUE self, VALUE src_moments)
{
  try {
    cvGetHuMoments(CVMOMENTS(src_moments), CVHUMOMENTS(self));
  }
  catch (cv::Exception& e) {
    raise_cverror(e);
  }
  return self;      
}
Exemplo n.º 9
0
VALUE
new_object(CvMoments *src_moments)
{
  VALUE object = rb_allocate(rb_klass);
  try {
    cvGetHuMoments(src_moments, CVHUMOMENTS(object));
  }
  catch (cv::Exception& e) {
    raise_cverror(e);
  }
  return object;
}
Exemplo n.º 10
0
/*
 * Calculates distance between a point and the nearest contour edgex
 * @overload measure_distance(point)
 *   @param point [CvPoint2D32f] Point tested against the contour
 * @return Signed distance between the point and the nearest contour edge
 * @opencv_func cvPointPolygonTest
 */
VALUE
rb_measure_distance(VALUE self, VALUE point)
{
  double distance = 0;
  try {
    distance = cvPointPolygonTest(CVARR(self), VALUE_TO_CVPOINT2D32F(point), 1);
  }
  catch (cv::Exception& e) {
    raise_cverror(e);
  }
  return rb_float_new(distance);
}
Exemplo n.º 11
0
/*
 * Performs a point-in-contour test.
 * The method determines whether the point is inside a contour, outside,
 * or lies on an edge (or coincides with a vertex).
 * @overload in?(point)
 *   @param point [CvPoint2D32f] Point tested against the contour
 * @return [Boolean] If the point is inside, returns true. If outside, returns false.
 *   If lies on an edge, returns nil.
 * @opencv_func cvPointPolygonTest
 */
VALUE
rb_in_q(VALUE self, VALUE point)
{
  double n = 0;
  try {
    n = cvPointPolygonTest(CVARR(self), VALUE_TO_CVPOINT2D32F(point), 0);
  }
  catch (cv::Exception& e) {
    raise_cverror(e);
  }
  return n == 0 ? Qnil : n > 0 ? Qtrue : Qfalse;
}
Exemplo n.º 12
0
/*
 * Calculates up-right bounding rectangle of point set.
 * @overload bounding_rect
 * @return [CvRect] Bounding rectangle
 * @opencv_func cvBoundingRect
 */
VALUE
rb_bounding_rect(VALUE self)
{
  CvRect rect;
  try {
    rect = cvBoundingRect(CVCONTOUR(self), 1);
  }
  catch (cv::Exception& e) {
    raise_cverror(e);
  }
  return cCvRect::new_object(rect);
}
Exemplo n.º 13
0
VALUE
rb_set_capture_property(VALUE self, int id, VALUE value)
{
  double result = 0;
  try {
    result = cvSetCaptureProperty(CVCAPTURE(self), id, NUM2DBL(value));
  }
  catch (cv::Exception& e) {
    raise_cverror(e);
  }
  return rb_float_new(result);
}
Exemplo n.º 14
0
/*
 * Get boolean flags indicating whether images should be converted to RGB
 */
VALUE
rb_get_convert_rgb(VALUE self)
{
  int flag = 0;
  try {
    flag = (int)cvGetCaptureProperty(CVCAPTURE(self), CV_CAP_PROP_CONVERT_RGB);
  }
  catch (cv::Exception& e) {
    raise_cverror(e);
  }
  return flag ? Qtrue : Qfalse;
}
Exemplo n.º 15
0
/*
 * call-seq:
 *   grab -> true or false
 *
 * Grabbed frame is stored internally. To grab frame
 * <i>fast</i> that is important for syncronization in case of reading from
 * several cameras simultaneously. The grabbed frames are not exposed because
 * they may be stored in compressed format (as defined by camera/driver).
 * To retrieve the grabbed frame, retrieve should be used.
 *
 * If grabbed frame was success, return true. Otherwise return false.
 */
VALUE
rb_grab(VALUE self)
{
  int grab = 0;
  try {
    grab = cvGrabFrame(CVCAPTURE(self));
  }
  catch (cv::Exception& e) {
    raise_cverror(e);
  }
  return grab ? Qtrue : Qfalse;
}
Exemplo n.º 16
0
VALUE
rb_get_capture_property(VALUE self, int id)
{
  double result = 0;
  try {
    result = cvGetCaptureProperty(CVCAPTURE(self), id);
  }
  catch (cv::Exception& e) {
    raise_cverror(e);
  }
  return rb_float_new(result);
}
Exemplo n.º 17
0
/*
 * Release IplConvKernel object from memory and delete from hashtable.
 */
void
release_iplconvkernel_object(void *ptr)
{
  if (ptr) {
    unregister_object(ptr);
    try {
      cvReleaseStructuringElement((IplConvKernel**)(&ptr));
    }
    catch (cv::Exception& e) {
      raise_cverror(e);
    }
  }
}
Exemplo n.º 18
0
/*
 * Delete mark symbol from hash table, then free memory.
 */
void
free_object(void *ptr)
{
  if (ptr) {
    unregister_object(ptr);
    try {
      cvFree(&ptr);
    }
    catch (cv::Exception& e) {
      raise_cverror(e);
    }
  }
}
Exemplo n.º 19
0
VALUE
new_object()
{
  VALUE storage = cCvMemStorage::new_object();
  CvSeq *seq = NULL;
  try {
    seq = cvCreateSeq(CV_SEQ_CHAIN_CONTOUR, sizeof(CvChain), sizeof(char), CVMEMSTORAGE(storage));
  }
  catch (cv::Exception& e) {
    raise_cverror(e);
  }
  return cCvSeq::new_sequence(cCvChain::rb_class(), seq, T_FIXNUM, storage);
}
Exemplo n.º 20
0
/*
 * Get size of frames in the video stream.
 */
VALUE
rb_get_size(VALUE self)
{
  CvSize size;
  try {
    CvCapture* self_ptr = CVCAPTURE(self);
    size = cvSize((int)cvGetCaptureProperty(self_ptr, CV_CAP_PROP_FRAME_WIDTH),
		  (int)cvGetCaptureProperty(self_ptr, CV_CAP_PROP_FRAME_HEIGHT));
  }
  catch (cv::Exception& e) {
    raise_cverror(e);
  }
  return cCvSize::new_object(size);
}
/*
 * call-seq:
 *   CvHaarClassiferCascade.load(<i>path</i>) -> object-detector
 * 
 * Load trained cascade of haar classifers from file.
 * Object detection classifiers are stored in XML or YAML files.
 * sample of object detection classifier files is included by OpenCV.
 *
 * You can found these at
 *    C:\Program Files\OpenCV\data\haarcascades\*.xml (Windows, default install path)
 *
 * e.g. you want to try to detect human's face.
 *    detector = CvHaarClassiferCascade.load("haarcascade_frontalface_alt.xml")
 */
VALUE
rb_load(VALUE klass, VALUE path)
{
  CvHaarClassifierCascade *cascade = NULL;
  try {
    cascade = (CvHaarClassifierCascade*)cvLoad(StringValueCStr(path), 0, 0, 0);
  }
  catch (cv::Exception& e) {
    raise_cverror(e);
  }
  if (!CV_IS_HAAR_CLASSIFIER(cascade))
    rb_raise(rb_eArgError, "invalid format haar classifier cascade file.");
  return Data_Wrap_Struct(klass, 0, cvhaarclassifiercascade_free, cascade);
}
Exemplo n.º 22
0
/*
 * call-seq:
 *   CvSeq.new(type[,storage])
 *
 * Return a new CvSeq. <i>type</i> should be following classes.
 *
 * * CvIndex
 * * CvPoint
 */
VALUE
rb_initialize(int argc, VALUE *argv, VALUE self)
{
  VALUE klass, storage_value;

  rb_scan_args(argc, argv, "11", &klass, &storage_value);
  if (!rb_obj_is_kind_of(klass, rb_cClass))
    raise_typeerror(klass, rb_cClass);

  int type = 0, size = 0;
  if (klass == rb_cFixnum) {
    type = CV_SEQ_ELTYPE_INDEX;
    size = sizeof(int);
  }
  else if (klass == cCvPoint::rb_class()) {
    type = CV_SEQ_ELTYPE_POINT;
    size = sizeof(CvPoint);
  }
  else if (klass == cCvPoint2D32f::rb_class()) {
    type = CV_SEQ_ELTYPE_POINT;
    size = sizeof(CvPoint2D32f);
  }
  else if (klass == cCvPoint3D32f::rb_class()) {
    type = CV_SEQ_ELTYPE_POINT3D;
    size = sizeof(CvPoint3D32f);
  }
  else
    rb_raise(rb_eArgError, "unsupport %s class for sequence-block.", rb_class2name(klass));
  
  CvSeq* seq = NULL;
  if (NIL_P(storage_value)) {
    storage_value = cCvMemStorage::new_object(0);
  }
  else {
    storage_value = CHECK_CVMEMSTORAGE(storage_value);
  }
  
  try {
    seq = cvCreateSeq(type, sizeof(CvSeq), size, CVMEMSTORAGE(storage_value));
  }
  catch (cv::Exception& e) {
    raise_cverror(e);
  }
  DATA_PTR(self) = seq;
  register_elem_class(seq, klass);
  register_root_object(seq, storage_value);
  
  return self;
}
Exemplo n.º 23
0
/*
 * Creates hierarchical representation of contour
 * @overload create_tree(threshold = 0.0)
 *   @param threshold [Number] If <= 0, the method creates full binary tree representation.
 *     If > 0, the method creates representation with the precision threshold.
 * @return [CvContourTree] Hierarchical representation of the contour
 * @opencv_func cvCreateContourTree
 */
VALUE
rb_create_tree(int argc, VALUE *argv, VALUE self)
{
  VALUE threshold, storage;
  rb_scan_args(argc, argv, "01", &threshold);
  storage = cCvMemStorage::new_object();
  CvContourTree *tree = NULL;
  try {
    tree = cvCreateContourTree(CVSEQ(self), CVMEMSTORAGE(storage), IF_DBL(threshold, 0.0));
  }
  catch (cv::Exception& e) {
    raise_cverror(e);
  }
  return cCvSeq::new_sequence(cCvContourTree::rb_class(), (CvSeq*)tree, cCvPoint::rb_class(), storage);
}
Exemplo n.º 24
0
/*
 * Set size of frames in the video stream.
 */
VALUE
rb_set_size(VALUE self, VALUE value)
{
  double result = 0;
  CvSize size = VALUE_TO_CVSIZE(value);
  try {
    CvCapture* self_ptr = CVCAPTURE(self);
    cvSetCaptureProperty(self_ptr, CV_CAP_PROP_FRAME_WIDTH, size.width);
    result = cvSetCaptureProperty(self_ptr, CV_CAP_PROP_FRAME_HEIGHT, size.height);
  }
  catch (cv::Exception& e) {
    raise_cverror(e);
  }
  return DBL2NUM(result);
}
Exemplo n.º 25
0
/*
 * call-seq:
 *   points -> array(include cvpoint2d32f)
 * Find box vertices. Return Array contain 4 CvPoint2D32f.
 */
VALUE
rb_points(VALUE self)
{
  const int n = 4;
  CvPoint2D32f p[n];
  try {
    cvBoxPoints(*CVBOX2D(self), p);
  }
  catch (cv::Exception& e) {
    raise_cverror(e);
  }
  VALUE points = rb_ary_new2(n);
  for (int i = 0; i < n; ++i)
    rb_ary_store(points, i, cCvPoint2D32f::new_object(p[i]));        
  return points;
}
Exemplo n.º 26
0
CvSeq*
create_seq(int seq_flags, size_t header_size, VALUE storage_value)
{
  VALUE klass = Qnil;
  int eltype = seq_flags & CV_SEQ_ELTYPE_MASK;
  storage_value = CHECK_CVMEMSTORAGE(storage_value);

  switch (eltype) {
  case CV_SEQ_ELTYPE_POINT:
    klass = cCvPoint::rb_class();
    break;
  case CV_32FC2:
    klass = cCvPoint2D32f::rb_class();
    break;
  case CV_SEQ_ELTYPE_POINT3D:
    klass = cCvPoint3D32f::rb_class();
    break;
  case CV_SEQ_ELTYPE_CODE:
  case CV_SEQ_ELTYPE_INDEX:
    klass = rb_cFixnum;
    break;
  case CV_SEQ_ELTYPE_PPOINT: // or CV_SEQ_ELTYPE_PTR:
    // Not supported
    rb_raise(rb_eArgError, "seq_flags %d is not supported.", eltype);
    break;
  default:
    seq_flags = CV_SEQ_ELTYPE_POINT | CV_SEQ_KIND_GENERIC;
    klass = cCvPoint::rb_class();
    break;
  }

  int mat_type = CV_MAT_TYPE(seq_flags);
  size_t elem_size = (size_t)(CV_ELEM_SIZE(mat_type));
  CvSeq* seq = NULL;
  try {
    seq = cvCreateSeq(seq_flags, header_size, elem_size, CVMEMSTORAGE(storage_value));
  }
  catch (cv::Exception& e) {
    raise_cverror(e);
  }
  register_elem_class(seq, klass);
  register_root_object(seq, storage_value);
  
  return seq;
}
Exemplo n.º 27
0
/*
 * call-seq:
 *   match_shapes(object, method) -> float
 *
 * Compares two shapes(self and object). <i>object</i> should be CvContour.
 *
 * A - object1, B - object2:
 * * method=CV_CONTOURS_MATCH_I1
 *     I1(A,B)=sumi=1..7abs(1/mAi - 1/mBi)
 * * method=CV_CONTOURS_MATCH_I2
 *     I2(A,B)=sumi=1..7abs(mAi - mBi)
 * * method=CV_CONTOURS_MATCH_I3
 *     I3(A,B)=sumi=1..7abs(mAi - mBi)/abs(mAi)
 */
VALUE
rb_match_shapes(int argc, VALUE *argv, VALUE self)
{
  VALUE object, method, param;
  rb_scan_args(argc, argv, "21", &object, &method, &param);
  int method_flag = CVMETHOD("COMPARISON_METHOD", method);
  if (!rb_obj_is_kind_of(object, cCvContour::rb_class()))
    rb_raise(rb_eTypeError, "argument 1 (shape) should be %s",
        rb_class2name(cCvContour::rb_class()));
  double result = 0;
  try {
    result = cvMatchShapes(CVARR(self), CVARR(object), method_flag);
  }
  catch (cv::Exception& e) {
    raise_cverror(e);
  }
  return rb_float_new(result);
}
/*
 * call-seq:
 *   detect_objects(image[, options]) -> cvseq(include CvAvgComp object)
 *   detect_objects(image[, options]){|cmp| ... } -> cvseq(include CvAvgComp object)
 *
 * Detects objects in the image. This method finds rectangular regions in the
 * given image that are likely to contain objects the cascade has been trained
 * for and return those regions as a sequence of rectangles.
 *
 * * <i>option</i> should be Hash include these keys.
 *   :scale_factor (should be > 1.0)
 *      The factor by which the search window is scaled between the subsequent scans,
 *      1.1 mean increasing window by 10%.
 *   :storage
 *      Memory storage to store the resultant sequence of the object candidate rectangles
 *   :flags
 *      Mode of operation. Currently the only flag that may be specified is CV_HAAR_DO_CANNY_PRUNING .
 *      If it is set, the function uses Canny edge detector to reject some image regions that contain
 *      too few or too much edges and thus can not contain the searched object. The particular threshold
 *      values are tuned for face detection and in this case the pruning speeds up the processing
 *   :min_neighbors
 *      Minimum number (minus 1) of neighbor rectangles that makes up an object.
 *      All the groups of a smaller number of rectangles than min_neighbors - 1 are rejected.
 *      If min_neighbors is 0, the function does not any grouping at all and returns all the detected
 *      candidate rectangles, whitch many be useful if the user wants to apply a customized grouping procedure.
 *   :min_size
 *      Minimum window size. By default, it is set to size of samples the classifier has been
 *      trained on (~20x20 for face detection).
 *   :max_size
 *      aximum window size to use. By default, it is set to the size of the image.
 */
VALUE
rb_detect_objects(int argc, VALUE *argv, VALUE self)
{ 
  VALUE image, options;
  rb_scan_args(argc, argv, "11", &image, &options);

  double scale_factor;
  int flags, min_neighbors;
  CvSize min_size, max_size;
  VALUE storage_val;
  if (NIL_P(options)) {
    scale_factor = 1.1;
    flags = 0;
    min_neighbors = 3;
    min_size = max_size = cvSize(0, 0);
    storage_val = cCvMemStorage::new_object();
  }
  else {
    scale_factor = IF_DBL(LOOKUP_CVMETHOD(options, "scale_factor"), 1.1);
    flags = IF_INT(LOOKUP_CVMETHOD(options, "flags"), 0);
    min_neighbors = IF_INT(LOOKUP_CVMETHOD(options, "min_neighbors"), 3);
    VALUE min_size_val = LOOKUP_CVMETHOD(options, "min_size");
    min_size = NIL_P(min_size_val) ? cvSize(0, 0) : VALUE_TO_CVSIZE(min_size_val);
    VALUE max_size_val = LOOKUP_CVMETHOD(options, "max_size");
    max_size = NIL_P(max_size_val) ? cvSize(0, 0) : VALUE_TO_CVSIZE(max_size_val);
    storage_val = CHECK_CVMEMSTORAGE(LOOKUP_CVMETHOD(options, "storage"));
  }

  VALUE result = Qnil;
  try {
    IplImage *ipl = IPLIMAGE_WITH_CHECK(image);
    CvSeq *seq = cvHaarDetectObjects(ipl, CVHAARCLASSIFIERCASCADE(self), CVMEMSTORAGE(storage_val),
			      scale_factor, min_neighbors, flags, min_size, max_size);
    result = cCvSeq::new_sequence(cCvSeq::rb_class(), seq, cCvAvgComp::rb_class(), storage_val);
    if (rb_block_given_p()) {
      for(int i = 0; i < seq->total; ++i)
	rb_yield(REFER_OBJECT(cCvAvgComp::rb_class(), cvGetSeqElem(seq, i), storage_val));
    }
  }
  catch (cv::Exception& e) {
    raise_cverror(e);
  }
  return result;
}
Exemplo n.º 29
0
/*
 * call-seq:
 *   codes -> array(contain fixnum)
 *
 * Return Freeman chain codes.
 */
VALUE
rb_codes(VALUE self)
{
  CvChain *chain = CVCHAIN(self);
  CvChainPtReader reader;
  int total = chain->total;
  VALUE ary = rb_ary_new2(total);
  try {
    cvStartReadChainPoints(chain, &reader);
    for (int i = 0; i < total; ++i) {
      CV_READ_SEQ_ELEM(reader.code, (*((CvSeqReader*)&(reader))));
      rb_ary_store(ary, i, CHR2FIX(reader.code));
    }
  }
  catch (cv::Exception& e) {
    raise_cverror(e);
  }
  return ary;
}
Exemplo n.º 30
0
/*
 * call-seq:
 *   query -> IplImage or nil
 *
 * Grabs and returns a frame camera or file. Just a combination of grab and retrieve in one call.
 */
VALUE
rb_query(VALUE self)
{
  VALUE image = Qnil;
  try {
    IplImage *frame = cvQueryFrame(CVCAPTURE(self));
    if (!frame)
      return Qnil;
    image = cIplImage::new_object(cvSize(frame->width, frame->height),
				  CV_MAKETYPE(CV_8U, frame->nChannels));
    if (frame->origin == IPL_ORIGIN_TL)
      cvCopy(frame, CVARR(image));
    else
      cvFlip(frame, CVARR(image));
  }
  catch (cv::Exception& e) {
    raise_cverror(e);
  }
  return image;
}