/* * call-seq: * last -> obj or nil * * Return last sequence-block. */ VALUE rb_last(VALUE self) { CvSeq *seq = CVSEQ(self); if(!(seq->total > 0)) return Qnil; return REFER_OBJECT(seqblock_class(seq), cvGetSeqElem(seq, -1), self); }
/* * call-seq: * [<i>index</i>] -> obj or nil * * Return sequence-block at <i>index</i>. */ VALUE rb_aref(VALUE self, VALUE index) { CvSeq *seq = CVSEQ(self); int idx = NUM2INT(index); if(!(seq->total > 0)) return Qnil; if (idx >= seq->total) rb_raise(rb_eIndexError, "index %d out of sequence", idx); return REFER_OBJECT(seqblock_class(seq), cvGetSeqElem(seq, idx), self); }
/* * call-seq: * each{|obj| ... } -> self * * Calls block once for each sequence-block in <i>self</i>, * passing that sequence-block as a parameter. * seq = CvSeq.new(CvIndex) * seq.push(5, 6, 7) * seq.each{|x| print x, " -- " } * produces: * 5 -- 6 -- 7 -- */ VALUE rb_each(VALUE self) { CvSeq *seq = CVSEQ(self); if(seq->total > 0){ VALUE klass = seqblock_class(seq); for(int i = 0; i < seq->total; i++){ rb_yield(REFER_OBJECT(klass, cvGetSeqElem(seq, i), self)); } } return self; }
/* * call-seq: * [<i>index</i>] -> obj or nil * * Return sequence-block at <i>index</i>. */ VALUE rb_aref(VALUE self, VALUE index) { CvSeq *seq = CVSEQ(self); int idx = NUM2INT(index); if (seq->total == 0) return Qnil; if (idx >= seq->total) rb_raise(rb_eIndexError, "index %d out of sequence", idx); VALUE result = Qnil; try { if (seqblock_class(seq) == rb_cFixnum) result = INT2NUM(*CV_GET_SEQ_ELEM(int, seq, idx)); else result = REFER_OBJECT(seqblock_class(seq), cvGetSeqElem(seq, idx), self); }
/* * call-seq: * detect_objects(image[, options]) -> cvseq(include CvAvgComp object) * detect_objects(image[, options]){|cmp| ... } -> cvseq(include CvAvgComp object) * * Detects objects in the image. This method finds rectangular regions in the * given image that are likely to contain objects the cascade has been trained * for and return those regions as a sequence of rectangles. * * * <i>option</i> should be Hash include these keys. * :scale_factor (should be > 1.0) * The factor by which the search window is scaled between the subsequent scans, * 1.1 mean increasing window by 10%. * :storage * Memory storage to store the resultant sequence of the object candidate rectangles * :flags * Mode of operation. Currently the only flag that may be specified is CV_HAAR_DO_CANNY_PRUNING . * If it is set, the function uses Canny edge detector to reject some image regions that contain * too few or too much edges and thus can not contain the searched object. The particular threshold * values are tuned for face detection and in this case the pruning speeds up the processing * :min_neighbors * Minimum number (minus 1) of neighbor rectangles that makes up an object. * All the groups of a smaller number of rectangles than min_neighbors - 1 are rejected. * If min_neighbors is 0, the function does not any grouping at all and returns all the detected * candidate rectangles, whitch many be useful if the user wants to apply a customized grouping procedure. * :min_size * Minimum window size. By default, it is set to size of samples the classifier has been * trained on (~20x20 for face detection). * :max_size * aximum window size to use. By default, it is set to the size of the image. */ VALUE rb_detect_objects(int argc, VALUE *argv, VALUE self) { VALUE image, options; rb_scan_args(argc, argv, "11", &image, &options); double scale_factor; int flags, min_neighbors; CvSize min_size, max_size; VALUE storage_val; if (NIL_P(options)) { scale_factor = 1.1; flags = 0; min_neighbors = 3; min_size = max_size = cvSize(0, 0); storage_val = cCvMemStorage::new_object(); } else { scale_factor = IF_DBL(LOOKUP_CVMETHOD(options, "scale_factor"), 1.1); flags = IF_INT(LOOKUP_CVMETHOD(options, "flags"), 0); min_neighbors = IF_INT(LOOKUP_CVMETHOD(options, "min_neighbors"), 3); VALUE min_size_val = LOOKUP_CVMETHOD(options, "min_size"); min_size = NIL_P(min_size_val) ? cvSize(0, 0) : VALUE_TO_CVSIZE(min_size_val); VALUE max_size_val = LOOKUP_CVMETHOD(options, "max_size"); max_size = NIL_P(max_size_val) ? cvSize(0, 0) : VALUE_TO_CVSIZE(max_size_val); storage_val = CHECK_CVMEMSTORAGE(LOOKUP_CVMETHOD(options, "storage")); } VALUE result = Qnil; try { IplImage *ipl = IPLIMAGE_WITH_CHECK(image); CvSeq *seq = cvHaarDetectObjects(ipl, CVHAARCLASSIFIERCASCADE(self), CVMEMSTORAGE(storage_val), scale_factor, min_neighbors, flags, min_size, max_size); result = cCvSeq::new_sequence(cCvSeq::rb_class(), seq, cCvAvgComp::rb_class(), storage_val); if (rb_block_given_p()) { for(int i = 0; i < seq->total; ++i) rb_yield(REFER_OBJECT(cCvAvgComp::rb_class(), cvGetSeqElem(seq, i), storage_val)); } } catch (cv::Exception& e) { raise_cverror(e); } return result; }
/* * call-seq: * detect_objects_with_pruning(image[,scale_factor = 1.1, min_neighbor = 3, min_size = CvSize.new(0,0)]) -> cvseq(include CvAvgComp object) * detect_objects_with_pruning(image[,scale_factor = 1.1, min_neighbor = 3, min_size = CvSize.new(0,0)]){|cmp| ... } -> cvseq(include CvAvgComp object) * * Almost same to #detect_objects (Return detected objects). * * Before scanning to image, Canny edge detector to reject some image regions * that contain too few or too much edges, and thus can not contain the searched object. * * note: The particular threshold values are tuned for face detection. * And in this case the pruning speeds up the processing. */ VALUE rb_detect_objects_with_pruning(int argc, VALUE *argv, VALUE self) { VALUE image, storage, scale_factor, min_neighbors, min_size, result; rb_scan_args(argc, argv, "14", &image, &storage, &scale_factor, &min_neighbors, &min_size); if (!rb_obj_is_kind_of(image, cCvMat::rb_class())) rb_raise(rb_eTypeError, "argument 1(target-image) should be %s.", rb_class2name(cCvMat::rb_class())); double scale = IF_DBL(scale_factor, 1.1); if (!(scale > 1.0)) rb_raise(rb_eArgError, "argument 2 (scale factor) must > 1.0."); storage = CHECK_CVMEMSTORAGE(storage); CvSeq *seq = cvHaarDetectObjects(CVMAT(image), CVHAARCLASSIFIERCASCADE(self), CVMEMSTORAGE(storage), scale, IF_INT(min_neighbors, 3), CV_HAAR_DO_CANNY_PRUNING, NIL_P(min_size) ? cvSize(0,0) : VALUE_TO_CVSIZE(min_size)); result = cCvSeq::new_sequence(cCvSeq::rb_class(), seq, cCvAvgComp::rb_class(), storage); if (rb_block_given_p()) { for(int i = 0; i < seq->total; i++) rb_yield(REFER_OBJECT(cCvAvgComp::rb_class(), cvGetSeqElem(seq, i), storage)); } return result; }
/* * call-seq: * [<i>index</i>] -> obj or nil * * Return sequence-block at <i>index</i>. */ VALUE rb_aref(VALUE self, VALUE index) { CvSeq *seq = CVSEQ(self); int idx = NUM2INT(index); if (seq->total == 0) { return Qnil; } if (idx >= seq->total) { rb_raise(rb_eIndexError, "index %d out of sequence", idx); } VALUE result = Qnil; try { VALUE klass = seqblock_class(seq); if (RTEST(rb_class_inherited_p(klass, rb_cInteger))) { result = INT2NUM(*CV_GET_SEQ_ELEM(int, seq, idx)); } else { result = REFER_OBJECT(klass, cvGetSeqElem(seq, idx), self); } }
/* * call-seq: * center -> cvpoint2d32f * Return center point of box as CvPoint2D32f. */ VALUE rb_center(VALUE self) { return REFER_OBJECT(cCvPoint2D32f::rb_class(), &CVBOX2D(self)->center, self); }
/* * call-seq: * size -> cvsize2d32f * Return size of box as CvSize2D32f. */ VALUE rb_size(VALUE self) { return REFER_OBJECT(cCvSize2D32f::rb_class(), &CVBOX2D(self)->size, self); }
/* * Return position of the feature as CvPoint2D32f. * * @overload pt * @return [CvPoint2D32f] Position of the feature. */ VALUE rb_get_pt(VALUE self) { return REFER_OBJECT(cCvPoint2D32f::rb_class(), &CVSURFPOINT(self)->pt, self); }
/* * Return optional component boundary */ VALUE rb_contour(VALUE self) { return REFER_OBJECT(cCvContour::rb_class(), &CVCONNECTEDCOMP(self)->contour, self); }
/* * Return ROI of the component. */ VALUE rb_rect(VALUE self) { return REFER_OBJECT(cCvRect::rb_class(), &CVCONNECTEDCOMP(self)->rect, self); }
/* * Return average color of the connected component. */ VALUE rb_value(VALUE self) { return REFER_OBJECT(cCvScalar::rb_class(), &CVCONNECTEDCOMP(self)->value, self); }
VALUE rb_p2(VALUE self) { return REFER_OBJECT(cCvPoint::rb_class(), &CVCONTOURTREE(self)->p2, self); }