void AffineTransformPatch(IplImage* src, IplImage* dst, CvAffinePose pose) { CvRect src_large_roi = cvGetImageROI(src); IplImage* temp = cvCreateImage(cvSize(src_large_roi.width, src_large_roi.height), IPL_DEPTH_32F, src->nChannels); cvSetZero(temp); IplImage* temp2 = cvCloneImage(temp); CvMat* rotation_phi = cvCreateMat(2, 3, CV_32FC1); CvSize new_size = cvSize(temp->width*pose.lambda1, temp->height*pose.lambda2); IplImage* temp3 = cvCreateImage(new_size, IPL_DEPTH_32F, src->nChannels); cvConvertScale(src, temp); cvResetImageROI(temp); cv2DRotationMatrix(cvPoint2D32f(temp->width/2, temp->height/2), pose.phi, 1.0, rotation_phi); cvWarpAffine(temp, temp2, rotation_phi); cvSetZero(temp); cvResize(temp2, temp3); cv2DRotationMatrix(cvPoint2D32f(temp3->width/2, temp3->height/2), pose.theta - pose.phi, 1.0, rotation_phi); cvWarpAffine(temp3, temp, rotation_phi); cvSetImageROI(temp, cvRect(temp->width/2 - src_large_roi.width/4, temp->height/2 - src_large_roi.height/4, src_large_roi.width/2, src_large_roi.height/2)); cvConvertScale(temp, dst); cvReleaseMat(&rotation_phi); cvReleaseImage(&temp3); cvReleaseImage(&temp2); cvReleaseImage(&temp); }
void CvvImage::DrawToHDC( HDC hDCDst, RECT* pDstRect ) { if( pDstRect && m_img && m_img->depth == IPL_DEPTH_8U && m_img->imageData ) { uchar buffer[sizeof(BITMAPINFOHEADER) + 1024]; BITMAPINFO* bmi = (BITMAPINFO*)buffer; int bmp_w = m_img->width, bmp_h = m_img->height; CvRect roi = cvGetImageROI( m_img ); CvRect dst = RectToCvRect( *pDstRect ); if( roi.width == dst.width && roi.height == dst.height ) { Show( hDCDst, dst.x, dst.y, dst.width, dst.height, roi.x, roi.y ); return; } if( roi.width > dst.width ) { SetStretchBltMode( hDCDst, // handle to device context HALFTONE ); } else { SetStretchBltMode( hDCDst, // handle to device context COLORONCOLOR ); } FillBitmapInfo( bmi, bmp_w, bmp_h, Bpp(), m_img->origin ); ::StretchDIBits( hDCDst, dst.x, dst.y, dst.width, dst.height, roi.x, roi.y, roi.width, roi.height, m_img->imageData, bmi, DIB_RGB_COLORS, SRCCOPY ); } }
// Мини-скриншот указаного участка изображения, с подсветкой трёх точек, и нанесением точек из вектора (если есть) void saveFragment(IplImage* src, CvRect roi, int x1, int y1, int x2, int y2, int x3, int y3, const char* fileName, vector<CvPoint>* edg = 0){ CvRect tempRoi = cvGetImageROI(src); cvSetImageROI(src, roi); IplImage* img = cvCreateImage(cvSize(roi.width, roi.height), src->depth, src->nChannels); //cvFillImage(img, -1); cvCopy(src, img, 0); if( edg ) { for( int i = 0; i< edg->size(); i++){ if( ((edg->at(i).x > roi.x)&&(edg->at(i).x < roi.x+roi.width)) && ((edg->at(i).y > roi.y)&&(edg->at(i).y < roi.y+roi.height)) ){ cvLine( img, cvPoint( edg->at(i).x - roi.x, edg->at(i).y - roi.y ), cvPoint( edg->at(i).x - roi.x, edg->at(i).y - roi.y ), CV_RGB(0,0,0) ); } } } cvCircle( img, cvPoint(x1-roi.x, y1-roi.y), 1, CV_RGB(0, 0, 255) ); cvCircle( img, cvPoint(x2-roi.x, y2-roi.y), 1, CV_RGB(0, 0, 255) ); cvCircle( img, cvPoint(x3-roi.x, y3-roi.y), 1, CV_RGB(0, 255, 255) ); cvSaveImage(fileName, img); cvSetImageROI(src, tempRoi); }
void cvSaveImageBlob(const char *filename, IplImage *img, CvBlob const *blob) { CvRect roi = cvGetImageROI(img); cvSetImageROItoBlob(img, blob); cvSaveImage(filename, img); cvSetImageROI(img, roi); }
int FrameLoader::getFrameNormFactor (int frameNumber, _frame_normalization_methodT fnm) { _TICTOC_TIC_FUNC; if (lastFrameLoaded != frameNumber) { if (loadWholeFrame(frameNumber) != 0){ _TICTOC_TOC_FUNC; return -1; }; lastFrameLoaded = frameNumber; } switch (fnm){ case _frame_none: _TICTOC_TOC_FUNC; return 0; break; case _frame_wholeImage: _TICTOC_TOC_FUNC; return (int) (cvSum(loadIm).val[0]); break; case _frame_excerptedRect: checkAr(); CvRect roi = cvGetImageROI(loadIm); cvSetImageROI(loadIm, ar); int rv = (int) (cvSum(loadIm).val[0]); cvSetImageROI(loadIm, roi); _TICTOC_TOC_FUNC; return rv; break; } _TICTOC_TOC_FUNC; return 0; //should never reach this point }
IplImage* createSubArray(IplImage *src, CvRect rect) { CvRect prevRect = cvGetImageROI(src); cvSetImageROI(src, rect); IplImage *dst = cvCreateImage(cvGetSize(src), src->depth, src->nChannels); cvCopy(src, dst, NULL); cvSetImageROI(src, prevRect); return dst; }
void translate_image(IplImage *img, int dx, int dy) { CvRect roi = cvGetImageROI(img); roi.x += dx; roi.y += dy; cvResetImageROI(img); cvSetImageROI(img, roi); }
void CvOneWayDescriptor::Initialize(int pose_count, IplImage* frontal, const char* feature_name, int norm) { m_feature_name = std::string(feature_name); CvRect roi = cvGetImageROI(frontal); m_center = rect_center(roi); Allocate(pose_count, cvSize(roi.width, roi.height), frontal->nChannels); GenerateSamples(pose_count, frontal, norm); }
void FindOneWayDescriptorEx(int desc_count, const CvOneWayDescriptor* descriptors, IplImage* patch, float scale_min, float scale_max, float scale_step, int n, std::vector<int>& desc_idxs, std::vector<int>& pose_idxs, std::vector<float>& distances, std::vector<float>& scales, CvMat* avg, CvMat* eigenvectors) { CvSize patch_size = descriptors[0].GetPatchSize(); IplImage* input_patch; CvRect roi; input_patch= cvCreateImage(patch_size, IPL_DEPTH_8U, 1); roi = cvGetImageROI((IplImage*)patch); // float min_distance = 1e10; std::vector<int> _desc_idxs; _desc_idxs.resize(n); std::vector<int> _pose_idxs; _pose_idxs.resize(n); std::vector<float> _distances; _distances.resize(n); for (int i=0;i<n;i++) { distances[i] = 1e10; } for(float cur_scale = scale_min; cur_scale < scale_max; cur_scale *= scale_step) { CvRect roi_scaled = resize_rect(roi, cur_scale); cvSetImageROI(patch, roi_scaled); cvResize(patch, input_patch); FindOneWayDescriptor(desc_count, descriptors, input_patch, n,_desc_idxs, _pose_idxs, _distances, avg, eigenvectors); for (int i=0;i<n;i++) { if(_distances[i] < distances[i]) { distances[i] = _distances[i]; desc_idxs[i] = _desc_idxs[i]; pose_idxs[i] = _pose_idxs[i]; scales[i] = cur_scale; } } } cvSetImageROI((IplImage*)patch, roi); cvReleaseImage(&input_patch); }
void CvOneWayDescriptor::EstimatePose(IplImage* patch, int& pose_idx, float& distance) const { distance = 1e10; pose_idx = -1; CvRect roi = cvGetImageROI(patch); IplImage* patch_32f = cvCreateImage(cvSize(roi.width, roi.height), IPL_DEPTH_32F, patch->nChannels); float sum = cvSum(patch).val[0]; cvConvertScale(patch, patch_32f, 1/sum); for(int i = 0; i < m_pose_count; i++) { if(m_samples[i]->width != patch_32f->width || m_samples[i]->height != patch_32f->height) { continue; } float dist = cvNorm(m_samples[i], patch_32f); //float dist = 0.0f; //float i1,i2; //for (int y = 0; y<patch_32f->height; y++) // for (int x = 0; x< patch_32f->width; x++) // { // i1 = ((float*)(m_samples[i]->imageData + m_samples[i]->widthStep*y))[x]; // i2 = ((float*)(patch_32f->imageData + patch_32f->widthStep*y))[x]; // dist+= (i1-i2)*(i1-i2); // } if(dist < distance) { distance = dist; pose_idx = i; } #if 0 IplImage* img1 = cvCreateImage(cvSize(roi.width, roi.height), IPL_DEPTH_8U, 1); IplImage* img2 = cvCreateImage(cvSize(roi.width, roi.height), IPL_DEPTH_8U, 1); double maxval; cvMinMaxLoc(m_samples[i], 0, &maxval); cvConvertScale(m_samples[i], img1, 255.0/maxval); cvMinMaxLoc(patch_32f, 0, &maxval); cvConvertScale(patch_32f, img2, 255.0/maxval); cvNamedWindow("1", 1); cvShowImage("1", img1); cvNamedWindow("2", 1); cvShowImage("2", img2); printf("Distance = %f\n", dist); cvWaitKey(0); #endif } cvReleaseImage(&patch_32f); }
static double luck_diff(const IplImage *luck) { CvRect roi = cvGetImageROI(luck); double sum = 0; for (int i = 0; i < roi.height; ++i) for (int j = 0; j < roi.width; ++j) { CvScalar s = cvGet2D(luck, i, j); double t = 1-s.val[3]; sum += t*t; } return sum; }
/* * call-seq: * set_roi(<i>rect</i>) * set_roi(<i>rect</i>){|image| ...} * * Set ROI. <i>rect</i> should be CvRect or compatible object. * Return self. */ VALUE rb_set_roi(VALUE self, VALUE roi) { VALUE block = rb_block_given_p() ? rb_block_proc() : 0; if (block) { CvRect prev_roi = cvGetImageROI(IPLIMAGE(self)); cvSetImageROI(IPLIMAGE(self), VALUE_TO_CVRECT(roi)); rb_yield_values(1, self); cvSetImageROI(IPLIMAGE(self), prev_roi); } else { cvSetImageROI(IPLIMAGE(self), VALUE_TO_CVRECT(roi)); } return self; }
void FindOneWayDescriptorEx(int desc_count, const CvOneWayDescriptor* descriptors, IplImage* patch, float scale_min, float scale_max, float scale_step, int& desc_idx, int& pose_idx, float& distance, float& scale, CvMat* avg, CvMat* eigenvectors) { CvSize patch_size = descriptors[0].GetPatchSize(); IplImage* input_patch; CvRect roi; input_patch= cvCreateImage(patch_size, IPL_DEPTH_8U, 1); roi = cvGetImageROI((IplImage*)patch); int _desc_idx, _pose_idx; float _distance; distance = 1e10; for(float cur_scale = scale_min; cur_scale < scale_max; cur_scale *= scale_step) { // printf("Scale = %f\n", cur_scale); CvRect roi_scaled = resize_rect(roi, cur_scale); cvSetImageROI(patch, roi_scaled); cvResize(patch, input_patch); #if 0 if(roi.x > 244 && roi.y < 200) { cvNamedWindow("1", 1); cvShowImage("1", input_patch); cvWaitKey(0); } #endif FindOneWayDescriptor(desc_count, descriptors, input_patch, _desc_idx, _pose_idx, _distance, avg, eigenvectors); if(_distance < distance) { distance = _distance; desc_idx = _desc_idx; pose_idx = _pose_idx; scale = cur_scale; } } cvSetImageROI((IplImage*)patch, roi); cvReleaseImage(&input_patch); }
void ThresholdingParam(IplImage* imgGray, int iNumLayers, int& iMinLevel, int& iMaxLevel, float& step, float& power, int iHistMin /*= HIST_MIN*/) { _ASSERT(imgGray != NULL); _ASSERT(imgGray->nChannels == 1); int i, j; // create histogram int histImg[256] = {0}; uchar* buffImg = (uchar*)imgGray->imageData; CvRect rROI = cvGetImageROI(imgGray); buffImg += rROI.y * imgGray->widthStep + rROI.x; for (j = 0; j < rROI.height; j++) { for (i = 0; i < rROI.width; i++) { histImg[buffImg[i]] ++; } buffImg += imgGray->widthStep; } // params for (i = 0; i < 256; i++) { if (histImg[i] > iHistMin) { break; } } iMinLevel = i; for (i = 255; i >= 0; i--) { if (histImg[i] > iHistMin) { break; } } iMaxLevel = i; if (iMaxLevel <= iMinLevel) { iMaxLevel = 255; iMinLevel = 0; } // power double black = 1; double white = 1; for (i = iMinLevel; i < (iMinLevel + iMaxLevel) / 2; i++) { black += histImg[i]; } for (i = (iMinLevel + iMaxLevel) / 2; i < iMaxLevel; i++) { white += histImg[i]; } power = float(black) / float(2 * white); // step = float(iMaxLevel - iMinLevel) / float(iNumLayers); if (step < 1.0) { step = 1.0; } }// void ThresholdingParam(IplImage *imgGray, int iNumLayers, int &iMinLevel, int &iMaxLevel, int &iStep)
void CvOneWayDescriptor::InitializeFast(int pose_count, IplImage* frontal, const char* feature_name, CvMat* pca_hr_avg, CvMat* pca_hr_eigenvectors, CvOneWayDescriptor* pca_descriptors) { if(pca_hr_avg == 0) { Initialize(pose_count, frontal, feature_name, 1); return; } m_feature_name = std::string(feature_name); CvRect roi = cvGetImageROI(frontal); m_center = rect_center(roi); Allocate(pose_count, cvSize(roi.width, roi.height), frontal->nChannels); GenerateSamplesFast(frontal, pca_hr_avg, pca_hr_eigenvectors, pca_descriptors); }
CvImage& copy( const CvImage& another ) { if( !imageData || width != another.width || height != another.height ) { cvReleaseData( this ); cvResetImageROI( this ); cvInitImageHeader( this, cvSize( another.width, another.height), another.depth, another.nChannels, another.origin, another.align ); cvCreateImageData( this ); if( another.roi ) cvSetImageROI( this, cvGetImageROI( &another )); } cvCopy( (IplImage *)&another, this ); return *this; }
match_direction_t catcierge_haar_guess_direction(catcierge_haar_matcher_t *ctx, IplImage *thr_img, int inverted) { int left_sum; int right_sum; catcierge_haar_matcher_args_t *args = ctx->args; match_direction_t dir = MATCH_DIR_UNKNOWN; CvRect roi = cvGetImageROI(thr_img); assert(ctx); assert(ctx->args); // Left. cvSetImageROI(thr_img, cvRect(0, 0, 1, roi.height)); left_sum = (int)cvSum(thr_img).val[0]; // Right. cvSetImageROI(thr_img, cvRect(roi.width - 1, 0, 1, roi.height)); right_sum = (int)cvSum(thr_img).val[0]; if (abs(left_sum - right_sum) > 25) { if (ctx->super.debug) printf("Left: %d, Right: %d\n", left_sum, right_sum); if (right_sum > left_sum) { // Going right. dir = (args->in_direction == DIR_RIGHT) ? MATCH_DIR_IN : MATCH_DIR_OUT; } else { // Going left. dir = (args->in_direction == DIR_LEFT) ? MATCH_DIR_IN : MATCH_DIR_OUT; } } cvSetImageROI(thr_img, roi); if (inverted && (dir != MATCH_DIR_UNKNOWN)) { if (dir == MATCH_DIR_IN) return MATCH_DIR_OUT; return MATCH_DIR_IN; } else { return dir; } }
void CropResize::execute() { CVImage* cvimg = cvImageIn.getBuffer(); if(!cvimg) { std::cerr << getName() << "::execute()::ERROR::cvImageIn is NULL!...\n"; return; } CvRect* inputrect = rectIn.getBuffer(); if(inputrect) { m_inputrect = *inputrect; } else{ m_inputrect.x = 0; m_inputrect.y = 0; m_inputrect.width = cvimg->width; m_inputrect.height = cvimg->height; } if(m_inputrect.x < 0) m_inputrect.x = 0; if(m_inputrect.y < 0) m_inputrect.y = 0; if((m_inputrect.x+m_inputrect.width) >= cvimg->width) m_inputrect.width = cvimg->width - m_inputrect.x - 1; if((m_inputrect.y+m_inputrect.height) >= cvimg->height) m_inputrect.height = cvimg->height - m_inputrect.y - 1; CvSize* outputsize = sizeIn.getBuffer(); if(outputsize) { m_outputsize = *outputsize; } if(mp_cvimage) { if(mp_cvimage->width != outputsize->width || mp_cvimage->height != outputsize->height) { delete mp_cvimage; mp_cvimage = NULL; } } if(!mp_cvimage) { mp_cvimage = new CVImage(m_outputsize, cvimg->cvMatType, 0); cvImageOut.setBuffer(mp_cvimage); } //std::cout << getName() << "::execute()::mp_cvimage->width = " << mp_cvimage->width << ", mp_cvimage->height = " << mp_cvimage->height << "\n"; // cvSetZero(mp_cvimage->ipl); CvRect prevrect = cvGetImageROI(cvimg->ipl); cvSetImageROI(cvimg->ipl, m_inputrect); cvResize(cvimg->ipl, mp_cvimage->ipl, m_interpolation); cvSetImageROI(cvimg->ipl, prevrect); cvImageOut.out(); }
// 纯手工,性能比上面那个好 int sqrdiff(const IplImage *p1, const IplImage *p2) { CvRect roi = cvGetImageROI(p1); int v = p1->nChannels; int sum = 0; for (int i = 0; i < roi.height; ++i) for (int j = 0; j < roi.width; ++j) { CvScalar s1 = cvGet2D(p1, i, j); CvScalar s2 = cvGet2D(p2, i, j); for (int k = 0; k < v; ++k) { int t = s1.val[k]-s2.val[k]; sum += t*t; } } return sum; }
void CvOneWayDescriptor::GenerateSamples(int pose_count, IplImage* frontal, int norm) { /* if(m_transforms) { GenerateSamplesWithTransforms(pose_count, frontal); return; } */ CvRect roi = cvGetImageROI(frontal); IplImage* patch_8u = cvCreateImage(cvSize(roi.width/2, roi.height/2), frontal->depth, frontal->nChannels); for(int i = 0; i < pose_count; i++) { if(!m_transforms) { m_affine_poses[i] = GenRandomAffinePose(); } //AffineTransformPatch(frontal, patch_8u, m_affine_poses[i]); generate_mean_patch(frontal, patch_8u, m_affine_poses[i], num_mean_components, noise_intensity); float scale = 1.0f; if(norm) { float sum = cvSum(patch_8u).val[0]; scale = 1/sum; } cvConvertScale(patch_8u, m_samples[i], scale); #if 0 double maxval; cvMinMaxLoc(m_samples[i], 0, &maxval); IplImage* test = cvCreateImage(cvSize(roi.width/2, roi.height/2), IPL_DEPTH_8U, 1); cvConvertScale(m_samples[i], test, 255.0/maxval); cvNamedWindow("1", 1); cvShowImage("1", test); cvWaitKey(0); #endif } cvReleaseImage(&patch_8u); }
void catcierge_haar_matcher_save_step_image(catcierge_haar_matcher_t *ctx, IplImage *img, match_result_t *result, const char *name, const char *description, int save) { match_step_t *step = NULL; CvSize img_size; CvRect roi; assert(result->step_img_count < MAX_STEPS); if (ctx->super.debug) cvShowImage(description, img); step = &result->steps[result->step_img_count]; if (step->img) { cvReleaseImage(&step->img); step->img = NULL; } // If saving steps is turned off, simply release // any previous step image. if (!save) return; // We only want to copy the Region Of Interest (ROI). roi = cvGetImageROI(img); img_size.width = roi.width; img_size.height = roi.height; step->img = cvCreateImage(img_size, 8, img->nChannels); cvCopy(img, step->img, NULL); step->name = name; step->description = description; result->step_img_count++; }
void FindOneWayDescriptorEx(cv::flann::Index* m_pca_descriptors_tree, CvSize patch_size, int m_pca_dim_low, int m_pose_count, IplImage* patch, float scale_min, float scale_max, float scale_step, int& desc_idx, int& pose_idx, float& distance, float& scale, CvMat* avg, CvMat* eigenvectors) { IplImage* input_patch; CvRect roi; input_patch= cvCreateImage(patch_size, IPL_DEPTH_8U, 1); roi = cvGetImageROI((IplImage*)patch); int _desc_idx, _pose_idx; float _distance; distance = 1e10; for(float cur_scale = scale_min; cur_scale < scale_max; cur_scale *= scale_step) { // printf("Scale = %f\n", cur_scale); CvRect roi_scaled = resize_rect(roi, cur_scale); cvSetImageROI(patch, roi_scaled); cvResize(patch, input_patch); FindOneWayDescriptor(m_pca_descriptors_tree, patch_size, m_pca_dim_low, m_pose_count, input_patch, _desc_idx, _pose_idx, _distance, avg, eigenvectors); if(_distance < distance) { distance = _distance; desc_idx = _desc_idx; pose_idx = _pose_idx; scale = cur_scale; } } cvSetImageROI((IplImage*)patch, roi); cvReleaseImage(&input_patch); }
void FindOneWayDescriptor(cv::flann::Index* m_pca_descriptors_tree, CvSize patch_size, int m_pca_dim_low, int m_pose_count, IplImage* patch, int& desc_idx, int& pose_idx, float& distance, CvMat* avg, CvMat* eigenvectors) { desc_idx = -1; pose_idx = -1; distance = 1e10; //-------- //PCA_coeffs precalculating CvMat* pca_coeffs = cvCreateMat(1, m_pca_dim_low, CV_32FC1); int patch_width = patch_size.width; int patch_height = patch_size.height; //if (avg) //{ CvRect _roi = cvGetImageROI((IplImage*)patch); IplImage* test_img = cvCreateImage(cvSize(patch_width,patch_height), IPL_DEPTH_8U, 1); if(_roi.width != patch_width|| _roi.height != patch_height) { cvResize(patch, test_img); _roi = cvGetImageROI(test_img); } else { cvCopy(patch,test_img); } IplImage* patch_32f = cvCreateImage(cvSize(_roi.width, _roi.height), IPL_DEPTH_32F, 1); float sum = cvSum(test_img).val[0]; cvConvertScale(test_img, patch_32f, 1.0f/sum); //ProjectPCASample(patch_32f, avg, eigenvectors, pca_coeffs); //Projecting PCA CvMat* patch_mat = ConvertImageToMatrix(patch_32f); CvMat* temp = cvCreateMat(1, eigenvectors->cols, CV_32FC1); cvProjectPCA(patch_mat, avg, eigenvectors, temp); CvMat temp1; cvGetSubRect(temp, &temp1, cvRect(0, 0, pca_coeffs->cols, 1)); cvCopy(&temp1, pca_coeffs); cvReleaseMat(&temp); cvReleaseMat(&patch_mat); //End of projecting cvReleaseImage(&patch_32f); cvReleaseImage(&test_img); // } //-------- //float* target = new float[m_pca_dim_low]; //::flann::KNNResultSet res(1,pca_coeffs->data.fl,m_pca_dim_low); //::flann::SearchParams params; //params.checks = -1; //int maxDepth = 1000000; //int neighbors_count = 1; //int* neighborsIdx = new int[neighbors_count]; //float* distances = new float[neighbors_count]; //if (m_pca_descriptors_tree->findNearest(pca_coeffs->data.fl,neighbors_count,maxDepth,neighborsIdx,0,distances) > 0) //{ // desc_idx = neighborsIdx[0] / m_pose_count; // pose_idx = neighborsIdx[0] % m_pose_count; // distance = distances[0]; //} //delete[] neighborsIdx; //delete[] distances; cv::Mat m_object(1, m_pca_dim_low, CV_32F); cv::Mat m_indices(1, 1, CV_32S); cv::Mat m_dists(1, 1, CV_32F); float* object_ptr = m_object.ptr<float>(0); for (int i=0;i<m_pca_dim_low;i++) { object_ptr[i] = pca_coeffs->data.fl[i]; } m_pca_descriptors_tree->knnSearch(m_object, m_indices, m_dists, 1, cv::flann::SearchParams(-1) ); desc_idx = ((int*)(m_indices.ptr<int>(0)))[0] / m_pose_count; pose_idx = ((int*)(m_indices.ptr<int>(0)))[0] % m_pose_count; distance = ((float*)(m_dists.ptr<float>(0)))[0]; // delete[] target; // for(int i = 0; i < desc_count; i++) // { // int _pose_idx = -1; // float _distance = 0; // //#if 0 // descriptors[i].EstimatePose(patch, _pose_idx, _distance); //#else // if (!avg) // { // descriptors[i].EstimatePosePCA(patch, _pose_idx, _distance, avg, eigenvectors); // } // else // { // descriptors[i].EstimatePosePCA(pca_coeffs, _pose_idx, _distance, avg, eigenvectors); // } //#endif // // if(_distance < distance) // { // desc_idx = i; // pose_idx = _pose_idx; // distance = _distance; // } // } cvReleaseMat(&pca_coeffs); }
classifier->train(base_set, *rng, num_trees, depth, views, reduced_num_dim, num_quant_bits); } int CvRTreeClassifierGetOriginalNumClasses(cv::RTreeClassifier* classifier) { return classifier->original_num_classes(); } int CvRTreeClassifierGetNumClasses(cv::RTreeClassifier* classifier) { return classifier->classes(); } int CvRTreeClassifierGetSigniture( cv::RTreeClassifier* classifier, IplImage* image, CvPoint* point, int patchSize, float* signiture) { CvRect roi = cvRect(point->x - (patchSize >> 1), point->y - (patchSize>>1), patchSize, patchSize); CvRect originalRoi = cvGetImageROI(image); cvSetImageROI(image, roi); CvRect roi2 = cvGetImageROI(image); if (roi2.width != patchSize || roi2.height != patchSize) { cvSetImageROI(image, originalRoi); return 0; } IplImage* patch = cvCreateImage(cvSize(roi.width, roi.height), image->depth, image->nChannels); cvCopy(image, patch); classifier->getSignature(patch, signiture); cvReleaseImage(&patch); cvSetImageROI(image, originalRoi); return 1; }
//-------------------------------------------------------------------------------- ofRectangle ofxCvImage::getROI() const { CvRect rect = cvGetImageROI(cvImage); return ofRectangle((float)rect.x, (float)rect.y, (float)rect.width, (float)rect.height); }
void cveGetImageROI(IplImage* image, CvRect* rect) { CvRect rect2 = cvGetImageROI(image); memcpy(rect, &rect2, sizeof(CvRect)); }
//** void FindOneWayDescriptor(int desc_count, const CvOneWayDescriptor* descriptors, IplImage* patch, int n, std::vector<int>& desc_idxs, std::vector<int>& pose_idxs, std::vector<float>& distances, CvMat* avg, CvMat* eigenvectors) { for (int i=0;i<n;i++) { desc_idxs[i] = -1; pose_idxs[i] = -1; distances[i] = 1e10; } //-------- //PCA_coeffs precalculating int m_pca_dim_low = descriptors[0].GetPCADimLow(); CvMat* pca_coeffs = cvCreateMat(1, m_pca_dim_low, CV_32FC1); int patch_width = descriptors[0].GetPatchSize().width; int patch_height = descriptors[0].GetPatchSize().height; if (avg) { CvRect _roi = cvGetImageROI((IplImage*)patch); IplImage* test_img = cvCreateImage(cvSize(patch_width,patch_height), IPL_DEPTH_8U, 1); if(_roi.width != patch_width|| _roi.height != patch_height) { cvResize(patch, test_img); _roi = cvGetImageROI(test_img); } else { cvCopy(patch,test_img); } IplImage* patch_32f = cvCreateImage(cvSize(_roi.width, _roi.height), IPL_DEPTH_32F, 1); float sum = cvSum(test_img).val[0]; cvConvertScale(test_img, patch_32f, 1.0f/sum); //ProjectPCASample(patch_32f, avg, eigenvectors, pca_coeffs); //Projecting PCA CvMat* patch_mat = ConvertImageToMatrix(patch_32f); CvMat* temp = cvCreateMat(1, eigenvectors->cols, CV_32FC1); cvProjectPCA(patch_mat, avg, eigenvectors, temp); CvMat temp1; cvGetSubRect(temp, &temp1, cvRect(0, 0, pca_coeffs->cols, 1)); cvCopy(&temp1, pca_coeffs); cvReleaseMat(&temp); cvReleaseMat(&patch_mat); //End of projecting cvReleaseImage(&patch_32f); cvReleaseImage(&test_img); } //-------- for(int i = 0; i < desc_count; i++) { int _pose_idx = -1; float _distance = 0; #if 0 descriptors[i].EstimatePose(patch, _pose_idx, _distance); #else if (!avg) { descriptors[i].EstimatePosePCA(patch, _pose_idx, _distance, avg, eigenvectors); } else { descriptors[i].EstimatePosePCA(pca_coeffs, _pose_idx, _distance, avg, eigenvectors); } #endif for (int j=0;j<n;j++) { if(_distance < distances[j]) { for (int k=(n-1);k > j;k--) { desc_idxs[k] = desc_idxs[k-1]; pose_idxs[k] = pose_idxs[k-1]; distances[k] = distances[k-1]; } desc_idxs[j] = i; pose_idxs[j] = _pose_idx; distances[j] = _distance; break; } } } cvReleaseMat(&pca_coeffs); }
void CvOneWayDescriptor::EstimatePosePCA(CvArr* patch, int& pose_idx, float& distance, CvMat* avg, CvMat* eigenvectors) const { if(avg == 0) { // do not use pca if (!CV_IS_MAT(patch)) { EstimatePose((IplImage*)patch, pose_idx, distance); } else { } return; } CvRect roi; if (!CV_IS_MAT(patch)) { roi = cvGetImageROI((IplImage*)patch); if(roi.width != GetPatchSize().width || roi.height != GetPatchSize().height) { cvResize(patch, m_input_patch); patch = m_input_patch; roi = cvGetImageROI((IplImage*)patch); } } CvMat* pca_coeffs = cvCreateMat(1, m_pca_dim_low, CV_32FC1); if (CV_IS_MAT(patch)) { cvCopy((CvMat*)patch, pca_coeffs); } else { IplImage* patch_32f = cvCreateImage(cvSize(roi.width, roi.height), IPL_DEPTH_32F, 1); float sum = cvSum(patch).val[0]; cvConvertScale(patch, patch_32f, 1.0f/sum); ProjectPCASample(patch_32f, avg, eigenvectors, pca_coeffs); cvReleaseImage(&patch_32f); } distance = 1e10; pose_idx = -1; for(int i = 0; i < m_pose_count; i++) { float dist = cvNorm(m_pca_coeffs[i], pca_coeffs); // float dist = 0; // float data1, data2; // //CvMat* pose_pca_coeffs = m_pca_coeffs[i]; // for (int x=0; x < pca_coeffs->width; x++) // for (int y =0 ; y < pca_coeffs->height; y++) // { // data1 = ((float*)(pca_coeffs->data.ptr + pca_coeffs->step*x))[y]; // data2 = ((float*)(m_pca_coeffs[i]->data.ptr + m_pca_coeffs[i]->step*x))[y]; // dist+=(data1-data2)*(data1-data2); // } ////#if 1 // for (int j = 0; j < m_pca_dim_low; j++) // { // dist += (pose_pca_coeffs->data.fl[j]- pca_coeffs->data.fl[j])*(pose_pca_coeffs->data.fl[j]- pca_coeffs->data.fl[j]); // } //#else // for (int j = 0; j <= m_pca_dim_low - 4; j += 4) // { // dist += (pose_pca_coeffs->data.fl[j]- pca_coeffs->data.fl[j])* // (pose_pca_coeffs->data.fl[j]- pca_coeffs->data.fl[j]); // dist += (pose_pca_coeffs->data.fl[j+1]- pca_coeffs->data.fl[j+1])* // (pose_pca_coeffs->data.fl[j+1]- pca_coeffs->data.fl[j+1]); // dist += (pose_pca_coeffs->data.fl[j+2]- pca_coeffs->data.fl[j+2])* // (pose_pca_coeffs->data.fl[j+2]- pca_coeffs->data.fl[j+2]); // dist += (pose_pca_coeffs->data.fl[j+3]- pca_coeffs->data.fl[j+3])* // (pose_pca_coeffs->data.fl[j+3]- pca_coeffs->data.fl[j+3]); // } //#endif if(dist < distance) { distance = dist; pose_idx = i; } } cvReleaseMat(&pca_coeffs); }
/* * Get ROI as CvRect. */ VALUE rb_get_roi(VALUE self) { return cCvRect::new_object(cvGetImageROI(IPLIMAGE(self))); }
void CvOneWayDescriptor::GenerateSamplesFast(IplImage* frontal, CvMat* pca_hr_avg, CvMat* pca_hr_eigenvectors, CvOneWayDescriptor* pca_descriptors) { CvRect roi = cvGetImageROI(frontal); if(roi.width != GetInputPatchSize().width || roi.height != GetInputPatchSize().height) { cvResize(frontal, m_train_patch); frontal = m_train_patch; } CvMat* pca_coeffs = cvCreateMat(1, pca_hr_eigenvectors->cols, CV_32FC1); double maxval; cvMinMaxLoc(frontal, 0, &maxval); CvMat* frontal_data = ConvertImageToMatrix(frontal); float sum = cvSum(frontal_data).val[0]; cvConvertScale(frontal_data, frontal_data, 1.0f/sum); cvProjectPCA(frontal_data, pca_hr_avg, pca_hr_eigenvectors, pca_coeffs); for(int i = 0; i < m_pose_count; i++) { cvSetZero(m_samples[i]); for(int j = 0; j < m_pca_dim_high; j++) { float coeff = cvmGet(pca_coeffs, 0, j); IplImage* patch = pca_descriptors[j + 1].GetPatch(i); cvAddWeighted(m_samples[i], 1.0, patch, coeff, 0, m_samples[i]); #if 0 printf("coeff%d = %f\n", j, coeff); IplImage* test = cvCreateImage(cvSize(12, 12), IPL_DEPTH_8U, 1); double maxval; cvMinMaxLoc(patch, 0, &maxval); cvConvertScale(patch, test, 255.0/maxval); cvNamedWindow("1", 1); cvShowImage("1", test); cvWaitKey(0); #endif } cvAdd(pca_descriptors[0].GetPatch(i), m_samples[i], m_samples[i]); float sum = cvSum(m_samples[i]).val[0]; cvConvertScale(m_samples[i], m_samples[i], 1.0/sum); #if 0 IplImage* test = cvCreateImage(cvSize(12, 12), IPL_DEPTH_8U, 1); /* IplImage* temp1 = cvCreateImage(cvSize(12, 12), IPL_DEPTH_32F, 1); eigenvector2image(pca_hr_avg, temp1); IplImage* test = cvCreateImage(cvSize(12, 12), IPL_DEPTH_8U, 1); cvAdd(m_samples[i], temp1, temp1); cvMinMaxLoc(temp1, 0, &maxval); cvConvertScale(temp1, test, 255.0/maxval);*/ cvMinMaxLoc(m_samples[i], 0, &maxval); cvConvertScale(m_samples[i], test, 255.0/maxval); cvNamedWindow("1", 1); cvShowImage("1", frontal); cvNamedWindow("2", 1); cvShowImage("2", test); cvWaitKey(0); #endif } cvReleaseMat(&pca_coeffs); cvReleaseMat(&frontal_data); }