void cvShowVecSamples( const char* filename, int winwidth, int winheight, double scale ) { CvVecFile file; short tmp; int i; CvMat* sample; tmp = 0; file.input = fopen( filename, "rb" ); if( file.input != NULL ) { fread( &file.count, sizeof( file.count ), 1, file.input ); fread( &file.vecsize, sizeof( file.vecsize ), 1, file.input ); fread( &tmp, sizeof( tmp ), 1, file.input ); fread( &tmp, sizeof( tmp ), 1, file.input ); if( file.vecsize != winwidth * winheight ) { int guessed_w = 0; int guessed_h = 0; fprintf( stderr, "Warning: specified sample width=%d and height=%d " "does not correspond to .vec file vector size=%d.\n", winwidth, winheight, file.vecsize ); if( file.vecsize > 0 ) { guessed_w = cvFloor( sqrt( (float) file.vecsize ) ); if( guessed_w > 0 ) { guessed_h = file.vecsize / guessed_w; } } if( guessed_w <= 0 || guessed_h <= 0 || guessed_w * guessed_h != file.vecsize) { fprintf( stderr, "Error: failed to guess sample width and height\n" ); fclose( file.input ); return; } else { winwidth = guessed_w; winheight = guessed_h; fprintf( stderr, "Guessed width=%d, guessed height=%d\n", winwidth, winheight ); } } if( !feof( file.input ) && scale > 0 ) { CvMat* scaled_sample = 0; file.last = 0; file.vector = (short*) cvAlloc( sizeof( *file.vector ) * file.vecsize ); sample = scaled_sample = cvCreateMat( winheight, winwidth, CV_8UC1 ); if( scale != 1.0 ) { scaled_sample = cvCreateMat( MAX( 1, cvCeil( scale * winheight ) ), MAX( 1, cvCeil( scale * winwidth ) ), CV_8UC1 ); } cvNamedWindow( "Sample", CV_WINDOW_AUTOSIZE ); for( i = 0; i < file.count; i++ ) { icvGetHaarTraininDataFromVecCallback( sample, &file ); if( scale != 1.0 ) cvResize( sample, scaled_sample, CV_INTER_LINEAR); cvShowImage( "Sample", scaled_sample ); if( cvWaitKey( 0 ) == 27 ) break; } if( scaled_sample && scaled_sample != sample ) cvReleaseMat( &scaled_sample ); cvReleaseMat( &sample ); cvFree( &file.vector ); } fclose( file.input ); } }
void CvCalibFilter::Stop( bool calibrate ) { int i, j; isCalibrated = false; // deallocate undistortion maps for( i = 0; i < cameraCount; i++ ) { cvReleaseMat( &undistMap[i][0] ); cvReleaseMat( &undistMap[i][1] ); cvReleaseMat( &rectMap[i][0] ); cvReleaseMat( &rectMap[i][1] ); } if( calibrate && framesAccepted > 0 ) { int n = framesAccepted; CvPoint3D32f* buffer = (CvPoint3D32f*)cvAlloc( n * etalonPointCount * sizeof(buffer[0])); CvMat mat; float* rotMatr = (float*)cvAlloc( n * 9 * sizeof(rotMatr[0])); float* transVect = (float*)cvAlloc( n * 3 * sizeof(transVect[0])); int* counts = (int*)cvAlloc( n * sizeof(counts[0])); cvInitMatHeader( &mat, 1, sizeof(CvCamera)/sizeof(float), CV_32FC1, 0 ); memset( cameraParams, 0, cameraCount * sizeof(cameraParams[0])); for( i = 0; i < framesAccepted; i++ ) { counts[i] = etalonPointCount; for( j = 0; j < etalonPointCount; j++ ) buffer[i * etalonPointCount + j] = cvPoint3D32f( etalonPoints[j].x, etalonPoints[j].y, 0 ); } for( i = 0; i < cameraCount; i++ ) { cvCalibrateCamera( framesAccepted, counts, imgSize, points[i], buffer, cameraParams[i].distortion, cameraParams[i].matrix, transVect, rotMatr, 0 ); cameraParams[i].imgSize[0] = (float)imgSize.width; cameraParams[i].imgSize[1] = (float)imgSize.height; // cameraParams[i].focalLength[0] = cameraParams[i].matrix[0]; // cameraParams[i].focalLength[1] = cameraParams[i].matrix[4]; // cameraParams[i].principalPoint[0] = cameraParams[i].matrix[2]; // cameraParams[i].principalPoint[1] = cameraParams[i].matrix[5]; memcpy( cameraParams[i].rotMatr, rotMatr, 9 * sizeof(rotMatr[0])); memcpy( cameraParams[i].transVect, transVect, 3 * sizeof(transVect[0])); mat.data.ptr = (uchar*)(cameraParams + i); /* check resultant camera parameters: if there are some INF's or NAN's, stop and reset results */ if( !cvCheckArr( &mat, CV_CHECK_RANGE | CV_CHECK_QUIET, -10000, 10000 )) break; } isCalibrated = i == cameraCount; {/* calibrate stereo cameras */ if( cameraCount == 2 ) { stereo.camera[0] = &cameraParams[0]; stereo.camera[1] = &cameraParams[1]; icvStereoCalibration( framesAccepted, counts, imgSize, points[0],points[1], buffer, &stereo); for( i = 0; i < 9; i++ ) { stereo.fundMatr[i] = stereo.fundMatr[i]; } } } cvFree( &buffer ); cvFree( &counts ); cvFree( &rotMatr ); cvFree( &transVect ); } framesAccepted = 0; }
CV_IMPL CvBox2D cvFitEllipse2( const CvArr* array ) { CvBox2D box; double* Ad = 0, *bd = 0; CV_FUNCNAME( "cvFitEllipse2" ); memset( &box, 0, sizeof(box)); __BEGIN__; CvContour contour_header; CvSeq* ptseq = 0; CvSeqBlock block; int n; if( CV_IS_SEQ( array )) { ptseq = (CvSeq*)array; if( !CV_IS_SEQ_POINT_SET( ptseq )) CV_ERROR( CV_StsBadArg, "Unsupported sequence type" ); } else { CV_CALL( ptseq = cvPointSeqFromMat( CV_SEQ_KIND_GENERIC, array, &contour_header, &block )); } n = ptseq->total; if( n < 5 ) CV_ERROR( CV_StsBadSize, "Number of points should be >= 6" ); #if 1 icvFitEllipse_F( ptseq, &box ); #else /* * New fitellipse algorithm, contributed by Dr. Daniel Weiss */ { double gfp[5], rp[5], t; CvMat A, b, x; const double min_eps = 1e-6; int i, is_float; CvSeqReader reader; CV_CALL( Ad = (double*)cvAlloc( n*5*sizeof(Ad[0]) )); CV_CALL( bd = (double*)cvAlloc( n*sizeof(bd[0]) )); // first fit for parameters A - E A = cvMat( n, 5, CV_64F, Ad ); b = cvMat( n, 1, CV_64F, bd ); x = cvMat( 5, 1, CV_64F, gfp ); cvStartReadSeq( ptseq, &reader ); is_float = CV_SEQ_ELTYPE(ptseq) == CV_32FC2; for( i = 0; i < n; i++ ) { CvPoint2D32f p; if( is_float ) p = *(CvPoint2D32f*)(reader.ptr); else { p.x = (float)((int*)reader.ptr)[0]; p.y = (float)((int*)reader.ptr)[1]; } CV_NEXT_SEQ_ELEM( sizeof(p), reader ); bd[i] = 10000.0; // 1.0? Ad[i*5] = -(double)p.x * p.x; // A - C signs inverted as proposed by APP Ad[i*5 + 1] = -(double)p.y * p.y; Ad[i*5 + 2] = -(double)p.x * p.y; Ad[i*5 + 3] = p.x; Ad[i*5 + 4] = p.y; } cvSolve( &A, &b, &x, CV_SVD ); // now use general-form parameters A - E to find the ellipse center: // differentiate general form wrt x/y to get two equations for cx and cy A = cvMat( 2, 2, CV_64F, Ad ); b = cvMat( 2, 1, CV_64F, bd ); x = cvMat( 2, 1, CV_64F, rp ); Ad[0] = 2 * gfp[0]; Ad[1] = Ad[2] = gfp[2]; Ad[3] = 2 * gfp[1]; bd[0] = gfp[3]; bd[1] = gfp[4]; cvSolve( &A, &b, &x, CV_SVD ); // re-fit for parameters A - C with those center coordinates A = cvMat( n, 3, CV_64F, Ad ); b = cvMat( n, 1, CV_64F, bd ); x = cvMat( 3, 1, CV_64F, gfp ); for( i = 0; i < n; i++ ) { CvPoint2D32f p; if( is_float ) p = *(CvPoint2D32f*)(reader.ptr); else { p.x = (float)((int*)reader.ptr)[0]; p.y = (float)((int*)reader.ptr)[1]; } CV_NEXT_SEQ_ELEM( sizeof(p), reader ); bd[i] = 1.0; Ad[i * 3] = (p.x - rp[0]) * (p.x - rp[0]); Ad[i * 3 + 1] = (p.y - rp[1]) * (p.y - rp[1]); Ad[i * 3 + 2] = (p.x - rp[0]) * (p.y - rp[1]); } cvSolve(&A, &b, &x, CV_SVD); // store angle and radii rp[4] = -0.5 * atan2(gfp[2], gfp[1] - gfp[0]); // convert from APP angle usage t = sin(-2.0 * rp[4]); if( fabs(t) > fabs(gfp[2])*min_eps ) t = gfp[2]/t; else t = gfp[1] - gfp[0]; rp[2] = fabs(gfp[0] + gfp[1] - t); if( rp[2] > min_eps ) rp[2] = sqrt(2.0 / rp[2]); rp[3] = fabs(gfp[0] + gfp[1] + t); if( rp[3] > min_eps ) rp[3] = sqrt(2.0 / rp[3]); box.center.x = (float)rp[0]; box.center.y = (float)rp[1]; box.size.width = (float)(rp[2]*2); box.size.height = (float)(rp[3]*2); if( box.size.width > box.size.height ) { float tmp; CV_SWAP( box.size.width, box.size.height, tmp ); box.angle = (float)(90 + rp[4]*180/CV_PI); } if( box.angle < -180 ) box.angle += 360; if( box.angle > 360 ) box.angle -= 360; } #endif __END__; cvFree( &Ad ); cvFree( &bd ); return box; }
static CvCaptureCAM_DC1394 * icvCaptureFromCAM_DC1394 (int index) { quadlet_t modes[8], formats; int i; if (numPorts<0) icvInitCapture_DC1394(); if (numPorts==0) return 0; /* No i1394 ports found */ if (numCameras<1) return 0; if (index>=numCameras) return 0; if (index<0) return 0; CvCaptureCAM_DC1394 * pcap = (CvCaptureCAM_DC1394*)cvAlloc(sizeof(*pcap)); /* Select a port and camera */ pcap->device_name = videodev[cameras[index].portnum]; pcap->handle = handles[cameras[index].portnum]; pcap->camera = &cameras[index].cam; // get supported formats if (dc1394_query_supported_formats(pcap->handle, pcap->camera->node, &formats)<0) { fprintf(stderr,"%s:%d: Could not query supported formats\n",__FILE__,__LINE__); formats=0x0; } for (i=0; i < NUM_FORMATS; i++) { modes[i]=0; if (icvFormatSupportedCAM_DC1394(i+FORMAT_MIN, formats)) { if (dc1394_query_supported_modes(pcap->handle, pcap->camera->node, i+FORMAT_MIN, &modes[i])<0) { fprintf(stderr,"%s:%d: Could not query Format%d modes\n",__FILE__,__LINE__,i); } } } pcap->format = 0; pcap->mode = 0; pcap->color_mode = 0; pcap->frame_rate = 0; int format_idx = -1; // scan the list of preferred modes, and find a supported one for(i=0; (pcap->mode == 0) && (preferred_modes[i] != 0); i++) { if((preferred_modes[i] >= FORMAT_MIN) && (preferred_modes[i] <= FORMAT_MAX)) { pcap->format = preferred_modes[i]; format_idx = preferred_modes[i] - FORMAT_MIN; continue; } assert(format_idx != -1); if ( ! icvFormatSupportedCAM_DC1394(pcap->format, formats) ) continue; if ( icvModeSupportedCAM_DC1394(pcap->format, preferred_modes[i], modes[format_idx]) ) { pcap->mode = preferred_modes[i]; } } if (pcap->mode == 0) { fprintf(stderr,"%s:%d: Could not find a supported mode for this camera\n",__FILE__,__LINE__); goto ERROR; } pcap->color_mode = icvColorMode( pcap->mode ); if( pcap->color_mode == -1) { fprintf(stderr,"%s:%d: ERROR: BPP is Unsupported!!\n",__FILE__,__LINE__); goto ERROR; } // set frame rate to optimal value given format and mode pcap->frame_rate = icvGetBestFrameRate(pcap, pcap->format, pcap->mode); if (pcap->format!=FORMAT_SCALABLE_IMAGE_SIZE) { // everything except Format 7 if (dc1394_dma_setup_capture(pcap->handle, pcap->camera->node, index+1 /*channel*/, pcap->format, pcap->mode, SPEED_400, pcap->frame_rate, NUM_BUFFERS, #ifdef HAVE_DC1394_095 0 /*do_extra_buffering*/, #endif 1 /*DROP_FRAMES*/, pcap->device_name, pcap->camera) != DC1394_SUCCESS) { fprintf(stderr,"%s:%d: Failed to setup DMA capture with VIDEO1394\n",__FILE__,__LINE__); goto ERROR; } } else { if(dc1394_dma_setup_format7_capture(pcap->handle,pcap->camera->node,index+1 /*channel*/, pcap->mode, SPEED_400, QUERY_FROM_CAMERA, (unsigned int)QUERY_FROM_CAMERA, (unsigned int)QUERY_FROM_CAMERA, (unsigned int)QUERY_FROM_CAMERA, (unsigned int)QUERY_FROM_CAMERA, NUM_BUFFERS, #ifdef HAVE_DC1394_095 0 /*do_extra_buffering*/, #endif 1 /*DROP_FRAMES*/, pcap->device_name, pcap->camera) != DC1394_SUCCESS) { fprintf(stderr,"%s:%d: Failed to setup DMA capture with VIDEO1394\n",__FILE__,__LINE__); goto ERROR; } } if (dc1394_start_iso_transmission(pcap->handle, pcap->camera->node)!=DC1394_SUCCESS) { fprintf(stderr,"%s:%d: Could not start ISO transmission\n",__FILE__,__LINE__); goto ERROR; } usleep(DELAY); dc1394bool_t status; if (dc1394_get_iso_status(pcap->handle, pcap->camera->node, &status)!=DC1394_SUCCESS) { fprintf(stderr,"%s:%d: Could get ISO status",__FILE__,__LINE__); goto ERROR; } if (status==DC1394_FALSE) { fprintf(stderr,"%s:%d: ISO transmission refuses to start",__FILE__,__LINE__); goto ERROR; } // convert camera image to RGB by default pcap->convert=1; // no image data allocated yet pcap->buffer_is_writeable = 0; memset(&(pcap->frame), 0, sizeof(IplImage)); icvResizeFrame( pcap ); return pcap; ERROR: return 0; };
int main() { #ifdef VISP_HAVE_OPENCV try { vpVideoReader reader; reader.setFileName("video-postcard.mpeg"); vpImage<unsigned char> I; reader.acquire(I); #if (VISP_HAVE_OPENCV_VERSION < 0x020408) IplImage * cvI = NULL; #else cv::Mat cvI; #endif vpImageConvert::convert(I, cvI); // Display initialisation vpDisplayOpenCV d(I, 0, 0, "Klt tracking"); vpDisplay::display(I); vpDisplay::flush(I); vpKltOpencv tracker; // Set tracker parameters tracker.setMaxFeatures(200); tracker.setWindowSize(10); tracker.setQuality(0.01); tracker.setMinDistance(15); tracker.setHarrisFreeParameter(0.04); tracker.setBlockSize(9); tracker.setUseHarris(1); tracker.setPyramidLevels(3); // Initialise the tracking tracker.initTracking(cvI); while ( ! reader.end() ) { reader.acquire(I); std::cout << "acquire image " << reader.getFrameIndex() << std::endl; vpDisplay::display(I); vpImageConvert::convert(I, cvI); //! [Re-init tracker] // Restart the initialization to detect new keypoints if (reader.getFrameIndex() == 25) { std::cout << "Re initialize the tracker" << std::endl; #if (VISP_HAVE_OPENCV_VERSION >= 0x020408) // Save of previous features std::vector<cv::Point2f> prev_features = tracker.getFeatures(); // Start a new feature detection tracker.initTracking(cvI); std::vector<cv::Point2f> new_features = tracker.getFeatures(); // Add previous features if they are not to close to detected one double distance, minDistance_ = tracker.getMinDistance(); bool is_redundant; for (size_t i=0; i < prev_features.size(); i++) { // Test if a previous feature is not redundant with one of the newly detected is_redundant = false; for (size_t j=0; j < new_features.size(); j++){ distance = sqrt(vpMath::sqr(new_features[j].x-prev_features[i].x) + vpMath::sqr(new_features[j].y-prev_features[i].y)); if(distance < minDistance_){ is_redundant = true; break; } } if(is_redundant){ continue; } //std::cout << "Add previous feature with index " << i << std::endl; tracker.addFeature(prev_features[i]); } #else // Save of previous features int prev_nfeatures = tracker.getNbFeatures(); float x,y; long id; int j=0; CvPoint2D32f *prev_features = (CvPoint2D32f*)cvAlloc(prev_nfeatures*sizeof(CvPoint2D32f)); for (int i=0; i <prev_nfeatures ; i ++) { tracker.getFeature(i, id, x, y); prev_features[i].x=x; prev_features[i].y=y; //printf("prev feature %d: id %d coord: %g %g\n", i, id, x, y); } // Start a new feature detection tracker.initTracking(cvI); std::cout << "Detection of " << tracker.getNbFeatures() << " new features" << std::endl; // Add previous features if they are not to close to detected one double distance, minDistance_ = tracker.getMinDistance(); for(int i = tracker.getNbFeatures() ; j<prev_nfeatures && i<tracker.getMaxFeatures() ; j++){ // Test if a previous feature is not redundant with new the one that are newly detected bool is_redundant = false; for(int k=0; k<tracker.getNbFeatures(); k++){ tracker.getFeature(k,id,x,y); //printf("curr feature %d: id %d coord: %g %g\n", k, id, x, y); distance = sqrt(vpMath::sqr(x-prev_features[j].x) + vpMath::sqr(y-prev_features[j].y)); if(distance < minDistance_){ is_redundant = true; break; } } if(is_redundant){ continue; } //std::cout << "Add previous feature with index " << i << std::endl; tracker.addFeature(i, prev_features[j].x, prev_features[j].y); i++; } cvFree(&prev_features); #endif } // Track the features tracker.track(cvI); //! [Re-init tracker] std::cout << "tracking of " << tracker.getNbFeatures() << " features" << std::endl; tracker.display(I, vpColor::red); vpDisplay::flush(I); } vpDisplay::getClick(I); #if (VISP_HAVE_OPENCV_VERSION < 0x020408) cvReleaseImage(&cvI); #endif return 0; } catch(vpException &e) { std::cout << "Catch an exception: " << e << std::endl; } #endif }
void CvMLData :: set_train_test_split( const CvTrainTestSplit * spl) { CV_FUNCNAME( "CvMLData :: set_division" ); __BEGIN__; int sample_count = 0; if ( spl->class_part ) CV_ERROR( CV_StsBadArg, "this division type is not supported yet" ); if ( !values ) CV_ERROR( CV_StsInternal, "data is empty" ); sample_count = values->rows; float train_sample_portion; if (spl->train_sample_part_mode == CV_COUNT) { train_sample_count = spl->train_sample_part.count; if (train_sample_count > sample_count) CV_ERROR( CV_StsBadArg, "train samples count is not correct" ); train_sample_count = train_sample_count<=0 ? sample_count : train_sample_count; } else // dtype.train_sample_part_mode == CV_PORTION { train_sample_portion = spl->train_sample_part.portion; if ( train_sample_portion > 1) CV_ERROR( CV_StsBadArg, "train samples count is not correct" ); train_sample_portion = train_sample_portion <= FLT_EPSILON || 1 - train_sample_portion <= FLT_EPSILON ? 1 : train_sample_portion; train_sample_count = cvFloor( train_sample_portion * sample_count ); } if ( train_sample_count == sample_count ) { free_train_test_idx(); return; } if ( train_sample_idx && train_sample_idx->cols != train_sample_count ) free_train_test_idx(); if ( !sample_idx) { int test_sample_count = sample_count- train_sample_count; sample_idx = (int*)cvAlloc( sample_count * sizeof(sample_idx[0]) ); for (int i = 0; i < sample_count; i++ ) sample_idx[i] = i; train_sample_idx = cvCreateMatHeader( 1, train_sample_count, CV_32SC1 ); test_sample_idx = cvCreateMatHeader( 1, test_sample_count, CV_32SC1 ); *train_sample_idx = cvMat( 1, train_sample_count, CV_32SC1, &sample_idx[0] ); *test_sample_idx = cvMat( 1, test_sample_count, CV_32SC1, &sample_idx[train_sample_count] ); } mix = spl->mix; if ( mix ) mix_train_and_test_idx(); __END__; }
void CvMorphology::init( int _operation, int _max_width, int _src_dst_type, int _element_shape, CvMat* _element, CvSize _ksize, CvPoint _anchor, int _border_mode, CvScalar _border_value ) { CV_FUNCNAME( "CvMorphology::init" ); __BEGIN__; int depth = CV_MAT_DEPTH(_src_dst_type); int el_type = 0, nz = -1; if( _operation != ERODE && _operation != DILATE ) CV_ERROR( CV_StsBadArg, "Unknown/unsupported morphological operation" ); if( _element_shape == CUSTOM ) { if( !CV_IS_MAT(_element) ) CV_ERROR( CV_StsBadArg, "structuring element should be valid matrix if CUSTOM element shape is specified" ); el_type = CV_MAT_TYPE(_element->type); if( el_type != CV_8UC1 && el_type != CV_32SC1 ) CV_ERROR( CV_StsUnsupportedFormat, "the structuring element must have 8uC1 or 32sC1 type" ); _ksize = cvGetMatSize(_element); CV_CALL( nz = cvCountNonZero(_element)); if( nz == _ksize.width*_ksize.height ) _element_shape = RECT; } operation = _operation; el_shape = _element_shape; CV_CALL( CvBaseImageFilter::init( _max_width, _src_dst_type, _src_dst_type, _element_shape == RECT, _ksize, _anchor, _border_mode, _border_value )); if( el_shape == RECT ) { if( operation == ERODE ) { if( depth == CV_8U ) x_func = (CvRowFilterFunc)icvErodeRectRow_8u, y_func = (CvColumnFilterFunc)icvErodeRectCol_8u; else if( depth == CV_16U ) x_func = (CvRowFilterFunc)icvErodeRectRow_16u, y_func = (CvColumnFilterFunc)icvErodeRectCol_16u; else if( depth == CV_32F ) x_func = (CvRowFilterFunc)icvErodeRectRow_32f, y_func = (CvColumnFilterFunc)icvErodeRectCol_32f; } else { assert( operation == DILATE ); if( depth == CV_8U ) x_func = (CvRowFilterFunc)icvDilateRectRow_8u, y_func = (CvColumnFilterFunc)icvDilateRectCol_8u; else if( depth == CV_16U ) x_func = (CvRowFilterFunc)icvDilateRectRow_16u, y_func = (CvColumnFilterFunc)icvDilateRectCol_16u; else if( depth == CV_32F ) x_func = (CvRowFilterFunc)icvDilateRectRow_32f, y_func = (CvColumnFilterFunc)icvDilateRectCol_32f; } } else { int i, j, k = 0; int cn = CV_MAT_CN(src_type); CvPoint* nz_loc; if( !(element && el_sparse && _ksize.width == element->cols && _ksize.height == element->rows) ) { cvReleaseMat( &element ); cvFree( &el_sparse ); CV_CALL( element = cvCreateMat( _ksize.height, _ksize.width, CV_8UC1 )); CV_CALL( el_sparse = (uchar*)cvAlloc( ksize.width*ksize.height*(2*sizeof(int) + sizeof(uchar*)))); } if( el_shape == CUSTOM ) { CV_CALL( cvConvert( _element, element )); } else { CV_CALL( init_binary_element( element, el_shape, anchor )); } if( operation == ERODE ) { if( depth == CV_8U ) y_func = (CvColumnFilterFunc)icvErodeAny_8u; else if( depth == CV_16U ) y_func = (CvColumnFilterFunc)icvErodeAny_16u; else if( depth == CV_32F ) y_func = (CvColumnFilterFunc)icvErodeAny_32f; } else { assert( operation == DILATE ); if( depth == CV_8U ) y_func = (CvColumnFilterFunc)icvDilateAny_8u; else if( depth == CV_16U ) y_func = (CvColumnFilterFunc)icvDilateAny_16u; else if( depth == CV_32F ) y_func = (CvColumnFilterFunc)icvDilateAny_32f; } nz_loc = (CvPoint*)el_sparse; for( i = 0; i < ksize.height; i++ ) for( j = 0; j < ksize.width; j++ ) { if( element->data.ptr[i*element->step+j] ) nz_loc[k++] = cvPoint(j*cn,i); } if( k == 0 ) nz_loc[k++] = cvPoint(anchor.x*cn,anchor.y); el_sparse_count = k; } if( depth == CV_32F && border_mode == IPL_BORDER_CONSTANT ) { int i, cn = CV_MAT_CN(src_type); int* bt = (int*)border_tab; for( i = 0; i < cn; i++ ) bt[i] = CV_TOGGLE_FLT(bt[i]); } __END__; }
/* Create feature points on image and return number of them. Array points fills by found points */ int icvCreateFeaturePoints(IplImage *image, CvMat *points, CvMat *status) { int foundFeaturePoints = 0; IplImage *grayImage = 0; IplImage *eigImage = 0; IplImage *tmpImage = 0; CvPoint2D32f *cornerPoints = 0; CV_FUNCNAME( "icvFeatureCreatePoints" ); __BEGIN__; /* Test for errors */ if( image == 0 || points == 0 ) { CV_ERROR( CV_StsNullPtr, "Some of parameters is a NULL pointer" ); } /* Test image size */ int w,h; w = image->width; h = image->height; if( w <= 0 || h <= 0) { CV_ERROR( CV_StsOutOfRange, "Size of image must be > 0" ); } /* Test for matrices */ if( !CV_IS_MAT(points) ) { CV_ERROR( CV_StsUnsupportedFormat, "Input parameter points must be a matrix" ); } int needNumPoints; needNumPoints = points->cols; if( needNumPoints <= 0 ) { CV_ERROR( CV_StsOutOfRange, "Number of need points must be > 0" ); } if( points->rows != 2 ) { CV_ERROR( CV_StsOutOfRange, "Number of point coordinates must be == 2" ); } if( status != 0 ) { /* If status matrix exist test it for correct */ if( !CV_IS_MASK_ARR(status) ) { CV_ERROR( CV_StsUnsupportedFormat, "Statuses must be a mask arrays" ); } if( status->cols != needNumPoints ) { CV_ERROR( CV_StsUnmatchedSizes, "Size of points and statuses must be the same" ); } if( status->rows !=1 ) { CV_ERROR( CV_StsUnsupportedFormat, "Number of rows of status must be 1" ); } } /* Create temporary images */ CV_CALL( grayImage = cvCreateImage(cvSize(w,h), 8,1) ); CV_CALL( eigImage = cvCreateImage(cvSize(w,h),32,1) ); CV_CALL( tmpImage = cvCreateImage(cvSize(w,h),32,1) ); /* Create points */ CV_CALL( cornerPoints = (CvPoint2D32f*)cvAlloc( sizeof(CvPoint2D32f) * needNumPoints) ); int foundNum; double quality; double minDist; cvCvtColor(image,grayImage, CV_BGR2GRAY); foundNum = needNumPoints; quality = 0.01; minDist = 5; cvGoodFeaturesToTrack(grayImage, eigImage, tmpImage, cornerPoints, &foundNum, quality, minDist); /* Copy found points to result */ int i; for( i = 0; i < foundNum; i++ ) { cvmSet(points,0,i,cornerPoints[i].x); cvmSet(points,1,i,cornerPoints[i].y); } /* Set status if need */ if( status ) { for( i = 0; i < foundNum; i++ ) { status->data.ptr[i] = 1; } for( i = foundNum; i < needNumPoints; i++ ) { status->data.ptr[i] = 0; } } foundFeaturePoints = foundNum; __END__; /* Free allocated memory */ cvReleaseImage(&grayImage); cvReleaseImage(&eigImage); cvReleaseImage(&tmpImage); cvFree(&cornerPoints); return foundFeaturePoints; }
// Function cvCreateFGDStatModel initializes foreground detection process // parameters: // first_frame - frame from video sequence // parameters - (optional) if NULL default parameters of the algorithm will be used // p_model - pointer to CvFGDStatModel structure CV_IMPL CvBGStatModel* cvCreateFGDStatModel( IplImage* first_frame, CvFGDStatModelParams* parameters ) { CvFGDStatModel* p_model = 0; CV_FUNCNAME( "cvCreateFGDStatModel" ); __BEGIN__; int i, j, k, pixel_count, buf_size; CvFGDStatModelParams params; if( !CV_IS_IMAGE(first_frame) ) CV_ERROR( CV_StsBadArg, "Invalid or NULL first_frame parameter" ); if (first_frame->nChannels != 3) CV_ERROR( CV_StsBadArg, "first_frame must have 3 color channels" ); // Initialize parameters: if( parameters == NULL ) { params.Lc = CV_BGFG_FGD_LC; params.N1c = CV_BGFG_FGD_N1C; params.N2c = CV_BGFG_FGD_N2C; params.Lcc = CV_BGFG_FGD_LCC; params.N1cc = CV_BGFG_FGD_N1CC; params.N2cc = CV_BGFG_FGD_N2CC; params.delta = CV_BGFG_FGD_DELTA; params.alpha1 = CV_BGFG_FGD_ALPHA_1; params.alpha2 = CV_BGFG_FGD_ALPHA_2; params.alpha3 = CV_BGFG_FGD_ALPHA_3; params.T = CV_BGFG_FGD_T; params.minArea = CV_BGFG_FGD_MINAREA; params.is_obj_without_holes = 1; params.perform_morphing = 1; } else { params = *parameters; } CV_CALL( p_model = (CvFGDStatModel*)cvAlloc( sizeof(*p_model) )); memset( p_model, 0, sizeof(*p_model) ); p_model->type = CV_BG_MODEL_FGD; p_model->release = (CvReleaseBGStatModel)icvReleaseFGDStatModel; p_model->update = (CvUpdateBGStatModel)icvUpdateFGDStatModel;; p_model->params = params; // Initialize storage pools: pixel_count = first_frame->width * first_frame->height; buf_size = pixel_count*sizeof(p_model->pixel_stat[0]); CV_CALL( p_model->pixel_stat = (CvBGPixelStat*)cvAlloc(buf_size) ); memset( p_model->pixel_stat, 0, buf_size ); buf_size = pixel_count*params.N2c*sizeof(p_model->pixel_stat[0].ctable[0]); CV_CALL( p_model->pixel_stat[0].ctable = (CvBGPixelCStatTable*)cvAlloc(buf_size) ); memset( p_model->pixel_stat[0].ctable, 0, buf_size ); buf_size = pixel_count*params.N2cc*sizeof(p_model->pixel_stat[0].cctable[0]); CV_CALL( p_model->pixel_stat[0].cctable = (CvBGPixelCCStatTable*)cvAlloc(buf_size) ); memset( p_model->pixel_stat[0].cctable, 0, buf_size ); for( i = 0, k = 0; i < first_frame->height; i++ ) { for( j = 0; j < first_frame->width; j++, k++ ) { p_model->pixel_stat[k].ctable = p_model->pixel_stat[0].ctable + k*params.N2c; p_model->pixel_stat[k].cctable = p_model->pixel_stat[0].cctable + k*params.N2cc; } } // Init temporary images: CV_CALL( p_model->Ftd = cvCreateImage(cvSize(first_frame->width, first_frame->height), IPL_DEPTH_8U, 1)); CV_CALL( p_model->Fbd = cvCreateImage(cvSize(first_frame->width, first_frame->height), IPL_DEPTH_8U, 1)); CV_CALL( p_model->foreground = cvCreateImage(cvSize(first_frame->width, first_frame->height), IPL_DEPTH_8U, 1)); CV_CALL( p_model->background = cvCloneImage(first_frame)); CV_CALL( p_model->prev_frame = cvCloneImage(first_frame)); CV_CALL( p_model->storage = cvCreateMemStorage()); __END__; if( cvGetErrStatus() < 0 ) { CvBGStatModel* base_ptr = (CvBGStatModel*)p_model; if( p_model && p_model->release ) p_model->release( &base_ptr ); else cvFree( &p_model ); p_model = 0; } return (CvBGStatModel*)p_model; }
CV_IMPL CvFileStorage* cvOpenFileStorage( const char* query, CvMemStorage* dststorage, int flags, const char* encoding ) { CvFileStorage* fs = 0; int default_block_size = 1 << 18; bool append = (flags & 3) == CV_STORAGE_APPEND; bool mem = (flags & CV_STORAGE_MEMORY) != 0; bool write_mode = (flags & 3) != 0; bool write_base64 = (write_mode || append) && (flags & CV_STORAGE_BASE64) != 0; bool isGZ = false; size_t fnamelen = 0; const char * filename = query; std::vector<std::string> params; if ( !mem ) { params = analyze_file_name( query ); if ( !params.empty() ) filename = params.begin()->c_str(); if ( write_base64 == false && is_param_exist( params, "base64" ) ) write_base64 = (write_mode || append); } if( !filename || filename[0] == '\0' ) { if( !write_mode ) CV_Error( CV_StsNullPtr, mem ? "NULL or empty filename" : "NULL or empty buffer" ); mem = true; } else fnamelen = strlen(filename); if( mem && append ) CV_Error( CV_StsBadFlag, "CV_STORAGE_APPEND and CV_STORAGE_MEMORY are not currently compatible" ); fs = (CvFileStorage*)cvAlloc( sizeof(*fs) ); CV_Assert(fs); memset( fs, 0, sizeof(*fs)); fs->memstorage = cvCreateMemStorage( default_block_size ); fs->dststorage = dststorage ? dststorage : fs->memstorage; fs->flags = CV_FILE_STORAGE; fs->write_mode = write_mode; if( !mem ) { fs->filename = (char*)cvMemStorageAlloc( fs->memstorage, fnamelen+1 ); strcpy( fs->filename, filename ); char* dot_pos = strrchr(fs->filename, '.'); char compression = '\0'; if( dot_pos && dot_pos[1] == 'g' && dot_pos[2] == 'z' && (dot_pos[3] == '\0' || (cv_isdigit(dot_pos[3]) && dot_pos[4] == '\0')) ) { if( append ) { cvReleaseFileStorage( &fs ); CV_Error(CV_StsNotImplemented, "Appending data to compressed file is not implemented" ); } isGZ = true; compression = dot_pos[3]; if( compression ) dot_pos[3] = '\0', fnamelen--; } if( !isGZ ) { fs->file = fopen(fs->filename, !fs->write_mode ? "rt" : !append ? "wt" : "a+t" ); if( !fs->file ) goto _exit_; } else { #if USE_ZLIB char mode[] = { fs->write_mode ? 'w' : 'r', 'b', compression ? compression : '3', '\0' }; fs->gzfile = gzopen(fs->filename, mode); if( !fs->gzfile ) goto _exit_; #else cvReleaseFileStorage( &fs ); CV_Error(CV_StsNotImplemented, "There is no compressed file storage support in this configuration"); #endif } } fs->roots = 0; fs->struct_indent = 0; fs->struct_flags = 0; fs->wrap_margin = 71; if( fs->write_mode ) { int fmt = flags & CV_STORAGE_FORMAT_MASK; if( mem ) fs->outbuf = new std::deque<char>; if( fmt == CV_STORAGE_FORMAT_AUTO && filename ) { const char* dot_pos = NULL; const char* dot_pos2 = NULL; // like strrchr() implementation, but save two last positions simultaneously for (const char* pos = filename; pos[0] != 0; pos++) { if (pos[0] == '.') { dot_pos2 = dot_pos; dot_pos = pos; } } if (cv_strcasecmp(dot_pos, ".gz") && dot_pos2 != NULL) { dot_pos = dot_pos2; } fs->fmt = (cv_strcasecmp(dot_pos, ".xml") || cv_strcasecmp(dot_pos, ".xml.gz")) ? CV_STORAGE_FORMAT_XML : (cv_strcasecmp(dot_pos, ".json") || cv_strcasecmp(dot_pos, ".json.gz")) ? CV_STORAGE_FORMAT_JSON : CV_STORAGE_FORMAT_YAML ; } else if ( fmt != CV_STORAGE_FORMAT_AUTO ) { fs->fmt = fmt; } else { fs->fmt = CV_STORAGE_FORMAT_XML; } // we use factor=6 for XML (the longest characters (' and ") are encoded with 6 bytes (' and ") // and factor=4 for YAML ( as we use 4 bytes for non ASCII characters (e.g. \xAB)) int buf_size = CV_FS_MAX_LEN*(fs->fmt == CV_STORAGE_FORMAT_XML ? 6 : 4) + 1024; if (append) { fseek( fs->file, 0, SEEK_END ); if (ftell(fs->file) == 0) append = false; } fs->write_stack = cvCreateSeq( 0, sizeof(CvSeq), fs->fmt == CV_STORAGE_FORMAT_XML ? sizeof(CvXMLStackRecord) : sizeof(int), fs->memstorage ); fs->is_first = 1; fs->struct_indent = 0; fs->struct_flags = CV_NODE_EMPTY; fs->buffer_start = fs->buffer = (char*)cvAlloc( buf_size + 1024 ); fs->buffer_end = fs->buffer_start + buf_size; fs->base64_writer = 0; fs->is_default_using_base64 = write_base64; fs->state_of_writing_base64 = base64::fs::Uncertain; fs->is_write_struct_delayed = false; fs->delayed_struct_key = 0; fs->delayed_struct_flags = 0; fs->delayed_type_name = 0; if( fs->fmt == CV_STORAGE_FORMAT_XML ) { size_t file_size = fs->file ? (size_t)ftell( fs->file ) : (size_t)0; fs->strstorage = cvCreateChildMemStorage( fs->memstorage ); if( !append || file_size == 0 ) { if( encoding ) { if( strcmp( encoding, "UTF-16" ) == 0 || strcmp( encoding, "utf-16" ) == 0 || strcmp( encoding, "Utf-16" ) == 0 ) { cvReleaseFileStorage( &fs ); CV_Error( CV_StsBadArg, "UTF-16 XML encoding is not supported! Use 8-bit encoding\n"); } CV_Assert( strlen(encoding) < 1000 ); char buf[1100]; sprintf(buf, "<?xml version=\"1.0\" encoding=\"%s\"?>\n", encoding); icvPuts( fs, buf ); } else icvPuts( fs, "<?xml version=\"1.0\"?>\n" ); icvPuts( fs, "<opencv_storage>\n" ); } else { int xml_buf_size = 1 << 10; char substr[] = "</opencv_storage>"; int last_occurence = -1; xml_buf_size = MIN(xml_buf_size, int(file_size)); fseek( fs->file, -xml_buf_size, SEEK_END ); char* xml_buf = (char*)cvAlloc( xml_buf_size+2 ); // find the last occurrence of </opencv_storage> for(;;) { int line_offset = (int)ftell( fs->file ); char* ptr0 = icvGets( fs, xml_buf, xml_buf_size ), *ptr; if( !ptr0 ) break; ptr = ptr0; for(;;) { ptr = strstr( ptr, substr ); if( !ptr ) break; last_occurence = line_offset + (int)(ptr - ptr0); ptr += strlen(substr); } } cvFree( &xml_buf ); if( last_occurence < 0 ) { cvReleaseFileStorage( &fs ); CV_Error( CV_StsError, "Could not find </opencv_storage> in the end of file.\n" ); } icvCloseFile( fs ); fs->file = fopen( fs->filename, "r+t" ); CV_Assert(fs->file); fseek( fs->file, last_occurence, SEEK_SET ); // replace the last "</opencv_storage>" with " <!-- resumed -->", which has the same length icvPuts( fs, " <!-- resumed -->" ); fseek( fs->file, 0, SEEK_END ); icvPuts( fs, "\n" ); } fs->start_write_struct = icvXMLStartWriteStruct; fs->end_write_struct = icvXMLEndWriteStruct; fs->write_int = icvXMLWriteInt; fs->write_real = icvXMLWriteReal; fs->write_string = icvXMLWriteString; fs->write_comment = icvXMLWriteComment; fs->start_next_stream = icvXMLStartNextStream; } else if( fs->fmt == CV_STORAGE_FORMAT_YAML ) { if( !append) icvPuts( fs, "%YAML:1.0\n---\n" ); else icvPuts( fs, "...\n---\n" ); fs->start_write_struct = icvYMLStartWriteStruct; fs->end_write_struct = icvYMLEndWriteStruct; fs->write_int = icvYMLWriteInt; fs->write_real = icvYMLWriteReal; fs->write_string = icvYMLWriteString; fs->write_comment = icvYMLWriteComment; fs->start_next_stream = icvYMLStartNextStream; } else { if( !append ) icvPuts( fs, "{\n" ); else { bool valid = false; long roffset = 0; for ( ; fseek( fs->file, roffset, SEEK_END ) == 0; roffset -= 1 ) { const char end_mark = '}'; if ( fgetc( fs->file ) == end_mark ) { fseek( fs->file, roffset, SEEK_END ); valid = true; break; } } if ( valid ) { icvCloseFile( fs ); fs->file = fopen( fs->filename, "r+t" ); CV_Assert(fs->file); fseek( fs->file, roffset, SEEK_END ); fputs( ",", fs->file ); } else { CV_Error( CV_StsError, "Could not find '}' in the end of file.\n" ); } } fs->struct_indent = 4; fs->start_write_struct = icvJSONStartWriteStruct; fs->end_write_struct = icvJSONEndWriteStruct; fs->write_int = icvJSONWriteInt; fs->write_real = icvJSONWriteReal; fs->write_string = icvJSONWriteString; fs->write_comment = icvJSONWriteComment; fs->start_next_stream = icvJSONStartNextStream; } } else { if( mem ) { fs->strbuf = filename; fs->strbufsize = fnamelen; } size_t buf_size = 1 << 20; const char* yaml_signature = "%YAML"; const char* json_signature = "{"; const char* xml_signature = "<?xml"; char buf[16]; icvGets( fs, buf, sizeof(buf)-2 ); char* bufPtr = cv_skip_BOM(buf); size_t bufOffset = bufPtr - buf; if(strncmp( bufPtr, yaml_signature, strlen(yaml_signature) ) == 0) fs->fmt = CV_STORAGE_FORMAT_YAML; else if(strncmp( bufPtr, json_signature, strlen(json_signature) ) == 0) fs->fmt = CV_STORAGE_FORMAT_JSON; else if(strncmp( bufPtr, xml_signature, strlen(xml_signature) ) == 0) fs->fmt = CV_STORAGE_FORMAT_XML; else if(fs->strbufsize == bufOffset) CV_Error(CV_BADARG_ERR, "Input file is empty"); else CV_Error(CV_BADARG_ERR, "Unsupported file storage format"); if( !isGZ ) { if( !mem ) { fseek( fs->file, 0, SEEK_END ); buf_size = ftell( fs->file ); } else buf_size = fs->strbufsize; buf_size = MIN( buf_size, (size_t)(1 << 20) ); buf_size = MAX( buf_size, (size_t)(CV_FS_MAX_LEN*2 + 1024) ); } icvRewind(fs); fs->strbufpos = bufOffset; fs->str_hash = cvCreateMap( 0, sizeof(CvStringHash), sizeof(CvStringHashNode), fs->memstorage, 256 ); fs->roots = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvFileNode), fs->memstorage ); fs->buffer = fs->buffer_start = (char*)cvAlloc( buf_size + 256 ); fs->buffer_end = fs->buffer_start + buf_size; fs->buffer[0] = '\n'; fs->buffer[1] = '\0'; //mode = cvGetErrMode(); //cvSetErrMode( CV_ErrModeSilent ); CV_TRY { switch (fs->fmt) { case CV_STORAGE_FORMAT_XML : { icvXMLParse ( fs ); break; } case CV_STORAGE_FORMAT_YAML: { icvYMLParse ( fs ); break; } case CV_STORAGE_FORMAT_JSON: { icvJSONParse( fs ); break; } default: break; } } CV_CATCH_ALL { fs->is_opened = true; cvReleaseFileStorage( &fs ); CV_RETHROW(); } //cvSetErrMode( mode ); // release resources that we do not need anymore cvFree( &fs->buffer_start ); fs->buffer = fs->buffer_end = 0; } fs->is_opened = true; _exit_: if( fs ) { if( cvGetErrStatus() < 0 || (!fs->file && !fs->gzfile && !fs->outbuf && !fs->strbuf) ) { cvReleaseFileStorage( &fs ); } else if( !fs->write_mode ) { icvCloseFile(fs); // we close the file since it's not needed anymore. But icvCloseFile() resets is_opened, // which may be misleading. Since we restore the value of is_opened. fs->is_opened = true; } } return fs; }
/* Returns number of corresponding points */ int icvFindCorrForGivenPoints( IplImage *image1,/* Image 1 */ IplImage *image2,/* Image 2 */ CvMat *points1, CvMat *pntStatus1, CvMat *points2, CvMat *pntStatus2, int useFilter,/*Use fundamental matrix to filter points */ double threshold)/* Threshold for good points in filter */ { int resNumCorrPoints = 0; CvPoint2D32f* cornerPoints1 = 0; CvPoint2D32f* cornerPoints2 = 0; char* status = 0; float* errors = 0; CvMat* tmpPoints1 = 0; CvMat* tmpPoints2 = 0; CvMat* pStatus = 0; IplImage *grayImage1 = 0; IplImage *grayImage2 = 0; IplImage *pyrImage1 = 0; IplImage *pyrImage2 = 0; CV_FUNCNAME( "icvFindCorrForGivenPoints" ); __BEGIN__; /* Test input data for errors */ /* Test for null pointers */ if( image1 == 0 || image2 == 0 || points1 == 0 || points2 == 0 || pntStatus1 == 0 || pntStatus2 == 0) { CV_ERROR( CV_StsNullPtr, "Some of parameters is a NULL pointer" ); } /* Test image size */ int w,h; w = image1->width; h = image1->height; if( w <= 0 || h <= 0) { CV_ERROR( CV_StsOutOfRange, "Size of image1 must be > 0" ); } if( image2->width != w || image2->height != h ) { CV_ERROR( CV_StsUnmatchedSizes, "Size of images must be the same" ); } /* Test for matrices */ if( !CV_IS_MAT(points1) || !CV_IS_MAT(points2) || !CV_IS_MAT(pntStatus1) || !CV_IS_MAT(pntStatus2) ) { CV_ERROR( CV_StsUnsupportedFormat, "Input parameters (points and status) must be a matrices" ); } /* Test type of status matrices */ if( !CV_IS_MASK_ARR(pntStatus1) || !CV_IS_MASK_ARR(pntStatus2) ) { CV_ERROR( CV_StsUnsupportedFormat, "Statuses must be a mask arrays" ); } /* Test number of points */ int numPoints; numPoints = points1->cols; if( numPoints <= 0 ) { CV_ERROR( CV_StsOutOfRange, "Number of points1 must be > 0" ); } if( points2->cols != numPoints || pntStatus1->cols != numPoints || pntStatus2->cols != numPoints ) { CV_ERROR( CV_StsUnmatchedSizes, "Number of points and statuses must be the same" ); } if( points1->rows != 2 || points2->rows != 2 ) { CV_ERROR( CV_StsOutOfRange, "Number of points coordinates must be 2" ); } if( pntStatus1->rows != 1 || pntStatus2->rows != 1 ) { CV_ERROR( CV_StsOutOfRange, "Status must be a matrix 1xN" ); } /* ----- End test ----- */ /* Compute number of visible points on image1 */ int numVisPoints; numVisPoints = cvCountNonZero(pntStatus1); if( numVisPoints > 0 ) { /* Create temporary images */ /* We must use iplImage againts hughgui images */ /* CvvImage grayImage1; CvvImage grayImage2; CvvImage pyrImage1; CvvImage pyrImage2; */ /* Create Ipl images */ CV_CALL( grayImage1 = cvCreateImage(cvSize(w,h),8,1) ); CV_CALL( grayImage2 = cvCreateImage(cvSize(w,h),8,1) ); CV_CALL( pyrImage1 = cvCreateImage(cvSize(w,h),8,1) ); CV_CALL( pyrImage2 = cvCreateImage(cvSize(w,h),8,1) ); CV_CALL( cornerPoints1 = (CvPoint2D32f*)cvAlloc( sizeof(CvPoint2D32f)*numVisPoints) ); CV_CALL( cornerPoints2 = (CvPoint2D32f*)cvAlloc( sizeof(CvPoint2D32f)*numVisPoints) ); CV_CALL( status = (char*)cvAlloc( sizeof(char)*numVisPoints) ); CV_CALL( errors = (float*)cvAlloc( 2 * sizeof(float)*numVisPoints) ); int i; for( i = 0; i < numVisPoints; i++ ) { status[i] = 1; } /* !!! Need test creation errors */ /* if( !grayImage1.Create(w,h,8)) EXIT; if( !grayImage2.Create(w,h,8)) EXIT; if( !pyrImage1. Create(w,h,8)) EXIT; if( !pyrImage2. Create(w,h,8)) EXIT; */ cvCvtColor(image1,grayImage1,CV_BGR2GRAY); cvCvtColor(image2,grayImage2,CV_BGR2GRAY); /* grayImage1.CopyOf(image1,0); grayImage2.CopyOf(image2,0); */ /* Copy points good points from input data */ uchar *stat1 = pntStatus1->data.ptr; uchar *stat2 = pntStatus2->data.ptr; int curr = 0; for( i = 0; i < numPoints; i++ ) { if( stat1[i] ) { cornerPoints1[curr].x = (float)cvmGet(points1,0,i); cornerPoints1[curr].y = (float)cvmGet(points1,1,i); curr++; } } /* Define number of levels of pyramid */ cvCalcOpticalFlowPyrLK( grayImage1, grayImage2, pyrImage1, pyrImage2, cornerPoints1, cornerPoints2, numVisPoints, cvSize(10,10), 3, status, errors, cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03), 0/*CV_LKFLOW_PYR_A_READY*/ ); memset(stat2,0,sizeof(uchar)*numPoints); int currVis = 0; int totalCorns = 0; /* Copy new points and set status */ /* stat1 may not be the same as stat2 */ for( i = 0; i < numPoints; i++ ) { if( stat1[i] ) { if( status[currVis] && errors[currVis] < 1000 ) { stat2[i] = 1; cvmSet(points2,0,i,cornerPoints2[currVis].x); cvmSet(points2,1,i,cornerPoints2[currVis].y); totalCorns++; } currVis++; } } resNumCorrPoints = totalCorns; /* Filter points using RANSAC */ if( useFilter ) { resNumCorrPoints = 0; /* Use RANSAC filter for found points */ if( totalCorns > 7 ) { /* Create array with good points only */ CV_CALL( tmpPoints1 = cvCreateMat(2,totalCorns,CV_64F) ); CV_CALL( tmpPoints2 = cvCreateMat(2,totalCorns,CV_64F) ); /* Copy just good points */ int currPoint = 0; for( i = 0; i < numPoints; i++ ) { if( stat2[i] ) { cvmSet(tmpPoints1,0,currPoint,cvmGet(points1,0,i)); cvmSet(tmpPoints1,1,currPoint,cvmGet(points1,1,i)); cvmSet(tmpPoints2,0,currPoint,cvmGet(points2,0,i)); cvmSet(tmpPoints2,1,currPoint,cvmGet(points2,1,i)); currPoint++; } } /* Compute fundamental matrix */ CvMat fundMatr; double fundMatr_dat[9]; fundMatr = cvMat(3,3,CV_64F,fundMatr_dat); CV_CALL( pStatus = cvCreateMat(1,totalCorns,CV_32F) ); int num = cvFindFundamentalMat(tmpPoints1,tmpPoints2,&fundMatr,CV_FM_RANSAC,threshold,0.99,pStatus); if( num > 0 ) { int curr = 0; /* Set final status for points2 */ for( i = 0; i < numPoints; i++ ) { if( stat2[i] ) { if( cvmGet(pStatus,0,curr) == 0 ) { stat2[i] = 0; } curr++; } } resNumCorrPoints = curr; } } } } __END__; /* Free allocated memory */ cvFree(&cornerPoints1); cvFree(&cornerPoints2); cvFree(&status); cvFree(&errors); cvFree(&tmpPoints1); cvFree(&tmpPoints2); cvReleaseMat( &pStatus ); cvReleaseImage( &grayImage1 ); cvReleaseImage( &grayImage2 ); cvReleaseImage( &pyrImage1 ); cvReleaseImage( &pyrImage2 ); return resNumCorrPoints; }
CV_IMPL void* cvLoad( const char* filename, CvMemStorage* memstorage, const char* name, const char** _real_name ) { void* ptr = 0; const char* real_name = 0; cv::FileStorage fs(cvOpenFileStorage(filename, memstorage, CV_STORAGE_READ)); CvFileNode* node = 0; if( !fs.isOpened() ) return 0; if( name ) { node = cvGetFileNodeByName( *fs, 0, name ); } else { int i, k; for( k = 0; k < (*fs)->roots->total; k++ ) { CvSeq* seq; CvSeqReader reader; node = (CvFileNode*)cvGetSeqElem( (*fs)->roots, k ); CV_Assert(node != NULL); if( !CV_NODE_IS_MAP( node->tag )) return 0; seq = node->data.seq; node = 0; cvStartReadSeq( seq, &reader, 0 ); // find the first element in the map for( i = 0; i < seq->total; i++ ) { if( CV_IS_SET_ELEM( reader.ptr )) { node = (CvFileNode*)reader.ptr; goto stop_search; } CV_NEXT_SEQ_ELEM( seq->elem_size, reader ); } } stop_search: ; } if( !node ) CV_Error( CV_StsObjectNotFound, "Could not find the/an object in file storage" ); real_name = cvGetFileNodeName( node ); ptr = cvRead( *fs, node, 0 ); // sanity check if( !memstorage && (CV_IS_SEQ( ptr ) || CV_IS_SET( ptr )) ) CV_Error( CV_StsNullPtr, "NULL memory storage is passed - the loaded dynamic structure can not be stored" ); if( cvGetErrStatus() < 0 ) { cvRelease( (void**)&ptr ); real_name = 0; } if( _real_name) { if (real_name) { *_real_name = (const char*)cvAlloc(strlen(real_name)); memcpy((void*)*_real_name, real_name, strlen(real_name)); } else { *_real_name = 0; } } return ptr; }
bool CvCalibFilter::SetEtalon( CvCalibEtalonType type, double* params, int pointCount, CvPoint2D32f* _points ) { int i, arrSize; Stop(); if (latestPoints != NULL) { for( i = 0; i < MAX_CAMERAS; i++ ) cvFree( latestPoints + i ); } if( type == CV_CALIB_ETALON_USER || type != etalonType ) { if (etalonParams != NULL) { cvFree( &etalonParams ); } } etalonType = type; switch( etalonType ) { case CV_CALIB_ETALON_CHESSBOARD: etalonParamCount = 3; if( !params || cvRound(params[0]) != params[0] || params[0] < 3 || cvRound(params[1]) != params[1] || params[1] < 3 || params[2] <= 0 ) { assert(0); return false; } pointCount = cvRound((params[0] - 1)*(params[1] - 1)); break; case CV_CALIB_ETALON_USER: etalonParamCount = 0; if( !_points || pointCount < 4 ) { assert(0); return false; } break; default: assert(0); return false; } if( etalonParamCount > 0 ) { arrSize = etalonParamCount * sizeof(etalonParams[0]); etalonParams = (double*)cvAlloc( arrSize ); } arrSize = pointCount * sizeof(etalonPoints[0]); if( etalonPointCount != pointCount ) { if (etalonPoints != NULL) { cvFree( &etalonPoints ); } etalonPointCount = pointCount; etalonPoints = (CvPoint2D32f*)cvAlloc( arrSize ); } switch( etalonType ) { case CV_CALIB_ETALON_CHESSBOARD: { int etalonWidth = cvRound( params[0] ) - 1; int etalonHeight = cvRound( params[1] ) - 1; int x, y, k = 0; etalonParams[0] = etalonWidth; etalonParams[1] = etalonHeight; etalonParams[2] = params[2]; for( y = 0; y < etalonHeight; y++ ) for( x = 0; x < etalonWidth; x++ ) { etalonPoints[k++] = cvPoint2D32f( (etalonWidth - 1 - x)*params[2], y*params[2] ); } } break; case CV_CALIB_ETALON_USER: if (params != NULL) { memcpy( etalonParams, params, arrSize ); } if (_points != NULL) { memcpy( etalonPoints, _points, arrSize ); } break; default: assert(0); return false; } return true; }
bool CvCalibFilter::FindEtalon( CvMat** mats ) { bool result = true; if( !mats || etalonPointCount == 0 ) { assert(0); result = false; } if( result ) { int i, tempPointCount0 = etalonPointCount*2; for( i = 0; i < cameraCount; i++ ) { if( !latestPoints[i] ) latestPoints[i] = (CvPoint2D32f*) cvAlloc( tempPointCount0*2*sizeof(latestPoints[0])); } for( i = 0; i < cameraCount; i++ ) { CvSize size; int tempPointCount = tempPointCount0; bool found = false; if( !CV_IS_MAT(mats[i]) && !CV_IS_IMAGE(mats[i])) { assert(0); break; } size = cvGetSize(mats[i]); if( size.width != imgSize.width || size.height != imgSize.height ) { imgSize = size; } if( !grayImg || grayImg->width != imgSize.width || grayImg->height != imgSize.height ) { cvReleaseMat( &grayImg ); cvReleaseMat( &tempImg ); grayImg = cvCreateMat( imgSize.height, imgSize.width, CV_8UC1 ); tempImg = cvCreateMat( imgSize.height, imgSize.width, CV_8UC1 ); } if( !storage ) storage = cvCreateMemStorage(); switch( etalonType ) { case CV_CALIB_ETALON_CHESSBOARD: if( CV_MAT_CN(cvGetElemType(mats[i])) == 1 ) cvCopy( mats[i], grayImg ); else cvCvtColor( mats[i], grayImg, CV_BGR2GRAY ); found = cvFindChessBoardCornerGuesses( grayImg, tempImg, storage, cvSize( cvRound(etalonParams[0]), cvRound(etalonParams[1])), latestPoints[i], &tempPointCount ) != 0; if( found ) cvFindCornerSubPix( grayImg, latestPoints[i], tempPointCount, cvSize(5,5), cvSize(-1,-1), cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,10,0.1)); break; default: assert(0); result = false; break; } latestCounts[i] = found ? tempPointCount : -tempPointCount; result = result && found; } } if( storage ) cvClearMemStorage( storage ); return result; }
CV_IMPL void cvFloodFill( CvArr* arr, CvPoint seed_point, CvScalar newVal, CvScalar lo_diff, CvScalar up_diff, CvConnectedComp* comp, int flags, CvArr* maskarr ) { static void* ffill_tab[4]; static void* ffillgrad_tab[4]; static int inittab = 0; CvMat* tempMask = 0; CvFFillSegment* buffer = 0; CV_FUNCNAME( "cvFloodFill" ); if( comp ) memset( comp, 0, sizeof(*comp) ); __BEGIN__; int i, type, depth, cn, is_simple, idx; int buffer_size, connectivity = flags & 255; double nv_buf[4] = {0,0,0,0}; union { uchar b[4]; float f[4]; } ld_buf, ud_buf; CvMat stub, *img = (CvMat*)arr; CvMat maskstub, *mask = (CvMat*)maskarr; CvSize size; if( !inittab ) { icvInitFloodFill( ffill_tab, ffillgrad_tab ); inittab = 1; } CV_CALL( img = cvGetMat( img, &stub )); type = CV_MAT_TYPE( img->type ); depth = CV_MAT_DEPTH(type); cn = CV_MAT_CN(type); idx = type == CV_8UC1 || type == CV_8UC3 ? 0 : type == CV_32FC1 || type == CV_32FC3 ? 1 : -1; if( idx < 0 ) CV_ERROR( CV_StsUnsupportedFormat, "" ); if( connectivity == 0 ) connectivity = 4; else if( connectivity != 4 && connectivity != 8 ) CV_ERROR( CV_StsBadFlag, "Connectivity must be 4, 0(=4) or 8" ); is_simple = mask == 0 && (flags & CV_FLOODFILL_MASK_ONLY) == 0; for( i = 0; i < cn; i++ ) { if( lo_diff.val[i] < 0 || up_diff.val[i] < 0 ) CV_ERROR( CV_StsBadArg, "lo_diff and up_diff must be non-negative" ); is_simple &= fabs(lo_diff.val[i]) < DBL_EPSILON && fabs(up_diff.val[i]) < DBL_EPSILON; } size = cvGetMatSize( img ); if( (unsigned)seed_point.x >= (unsigned)size.width || (unsigned)seed_point.y >= (unsigned)size.height ) CV_ERROR( CV_StsOutOfRange, "Seed point is outside of image" ); cvScalarToRawData( &newVal, &nv_buf, type, 0 ); buffer_size = MAX( size.width, size.height )*2; CV_CALL( buffer = (CvFFillSegment*)cvAlloc( buffer_size*sizeof(buffer[0]))); if( is_simple ) { CvFloodFillFunc func = (CvFloodFillFunc)ffill_tab[idx]; if( !func ) CV_ERROR( CV_StsUnsupportedFormat, "" ); IPPI_CALL( func( img->data.ptr, img->step, size, seed_point, &nv_buf, comp, flags, buffer, buffer_size, cn )); } else { CvFloodFillGradFunc func = (CvFloodFillGradFunc)ffillgrad_tab[idx]; if( !func ) CV_ERROR( CV_StsUnsupportedFormat, "" ); if( !mask ) { /* created mask will be 8-byte aligned */ tempMask = cvCreateMat( size.height + 2, (size.width + 9) & -8, CV_8UC1 ); mask = tempMask; } else { CV_CALL( mask = cvGetMat( mask, &maskstub )); if( !CV_IS_MASK_ARR( mask )) CV_ERROR( CV_StsBadMask, "" ); if( mask->width != size.width + 2 || mask->height != size.height + 2 ) CV_ERROR( CV_StsUnmatchedSizes, "mask must be 2 pixel wider " "and 2 pixel taller than filled image" ); } { int width = tempMask ? mask->step : size.width + 2; uchar* mask_row = mask->data.ptr + mask->step; memset( mask_row - mask->step, 1, width ); for( i = 1; i <= size.height; i++, mask_row += mask->step ) { if( tempMask ) memset( mask_row, 0, width ); mask_row[0] = mask_row[size.width+1] = (uchar)1; } memset( mask_row, 1, width ); } if( depth == CV_8U ) for( i = 0; i < cn; i++ ) { int t = cvFloor(lo_diff.val[i]); ld_buf.b[i] = CV_CAST_8U(t); t = cvFloor(up_diff.val[i]); ud_buf.b[i] = CV_CAST_8U(t); } else for( i = 0; i < cn; i++ ) { ld_buf.f[i] = (float)lo_diff.val[i]; ud_buf.f[i] = (float)up_diff.val[i]; } IPPI_CALL( func( img->data.ptr, img->step, mask->data.ptr, mask->step, size, seed_point, &nv_buf, ld_buf.f, ud_buf.f, comp, flags, buffer, buffer_size, cn )); } __END__; cvFree( &buffer ); cvReleaseMat( &tempMask ); }
CV_IMPL void cvSegmentImage( const CvArr* srcarr, CvArr* dstarr, double canny_threshold, double ffill_threshold ) { CvMat* gray = 0; CvMat* canny = 0; void* stack = 0; CV_FUNCNAME( "cvSegmentImage" ); __BEGIN__; CvMat srcstub, *src; CvMat dststub, *dst; CvMat* mask; CvSize size; CvPoint pt; int ffill_lw_up = cvRound( fabs(ffill_threshold) ); CV_CALL( src = cvGetMat( srcarr, &srcstub )); CV_CALL( dst = cvGetMat( dstarr, &dststub )); if( src->data.ptr != dst->data.ptr ) { CV_CALL( cvCopy( src, dst )); src = dst; } size = cvGetSize( src ); CV_CALL( gray = cvCreateMat( size.height, size.width, CV_8UC1 )); CV_CALL( canny = cvCreateMat( size.height, size.width, CV_8UC1 )); CV_CALL( stack = cvAlloc( size.width * size.height * sizeof(Seg))); cvCvtColor( src, gray, CV_BGR2GRAY ); cvCanny( gray, canny, 0, canny_threshold, 5 ); mask = canny; // a new name for new role // make a non-zero border. cvRectangle( mask, cvPoint(0,0), cvPoint(size.width-1,size.height-1), 1, 1 ); for( pt.y = 0; pt.y < size.height; pt.y++ ) { for( pt.x = 0; pt.x < size.width; pt.x++ ) { if( mask->data.ptr[mask->step*pt.y + pt.x] == 0 ) { CvConnectedComp region; int avgVal[3] = { 0, 0, 0 }; icvSegmFloodFill_Stage1( src->data.ptr, src->step, mask->data.ptr, mask->step, size, pt, avgVal, ffill_lw_up, ffill_lw_up, ®ion, stack ); icvSegmFloodFill_Stage2( src->data.ptr, src->step, mask->data.ptr, mask->step, size, avgVal, region.rect ); } } } __END__; cvReleaseMat( &gray ); cvReleaseMat( &canny ); cvFree( &stack ); }
/* Initializes scanner structure. Prepare image for scanning ( clear borders and convert all pixels to 0-1. */ CV_IMPL CvContourScanner cvStartFindContours( void* _img, CvMemStorage* storage, int header_size, int mode, int method, CvPoint offset ) { int y; int step; CvSize size; uchar *img = 0; CvContourScanner scanner = 0; CvMat stub, *mat = (CvMat*)_img; CV_FUNCNAME( "cvStartFindContours" ); __BEGIN__; if( !storage ) CV_ERROR( CV_StsNullPtr, "" ); CV_CALL( mat = cvGetMat( mat, &stub )); if( !CV_IS_MASK_ARR( mat )) CV_ERROR( CV_StsUnsupportedFormat, "[Start]FindContours support only 8uC1 images" ); size = cvSize( mat->width, mat->height ); step = mat->step; img = (uchar*)(mat->data.ptr); if( method < 0 || method > CV_CHAIN_APPROX_TC89_KCOS ) CV_ERROR_FROM_STATUS( CV_BADRANGE_ERR ); if( header_size < (int) (method == CV_CHAIN_CODE ? sizeof( CvChain ) : sizeof( CvContour ))) CV_ERROR_FROM_STATUS( CV_BADSIZE_ERR ); scanner = (CvContourScanner)cvAlloc( sizeof( *scanner )); if( !scanner ) CV_ERROR_FROM_STATUS( CV_OUTOFMEM_ERR ); memset( scanner, 0, sizeof( *scanner )); scanner->storage1 = scanner->storage2 = storage; scanner->img0 = (char *) img; scanner->img = (char *) (img + step); scanner->img_step = step; scanner->img_size.width = size.width - 1; /* exclude rightest column */ scanner->img_size.height = size.height - 1; /* exclude bottomost row */ scanner->mode = mode; scanner->offset = offset; scanner->pt.x = scanner->pt.y = 1; scanner->lnbd.x = 0; scanner->lnbd.y = 1; scanner->nbd = 2; scanner->mode = (int) mode; scanner->frame_info.contour = &(scanner->frame); scanner->frame_info.is_hole = 1; scanner->frame_info.next = 0; scanner->frame_info.parent = 0; scanner->frame_info.rect = cvRect( 0, 0, size.width, size.height ); scanner->l_cinfo = 0; scanner->subst_flag = 0; scanner->frame.flags = CV_SEQ_FLAG_HOLE; scanner->approx_method2 = scanner->approx_method1 = method; if( method == CV_CHAIN_APPROX_TC89_L1 || method == CV_CHAIN_APPROX_TC89_KCOS ) scanner->approx_method1 = CV_CHAIN_CODE; if( scanner->approx_method1 == CV_CHAIN_CODE ) { scanner->seq_type1 = CV_SEQ_CHAIN_CONTOUR; scanner->header_size1 = scanner->approx_method1 == scanner->approx_method2 ? header_size : sizeof( CvChain ); scanner->elem_size1 = sizeof( char ); } else { scanner->seq_type1 = CV_SEQ_POLYGON; scanner->header_size1 = scanner->approx_method1 == scanner->approx_method2 ? header_size : sizeof( CvContour ); scanner->elem_size1 = sizeof( CvPoint ); } scanner->header_size2 = header_size; if( scanner->approx_method2 == CV_CHAIN_CODE ) { scanner->seq_type2 = scanner->seq_type1; scanner->elem_size2 = scanner->elem_size1; } else { scanner->seq_type2 = CV_SEQ_POLYGON; scanner->elem_size2 = sizeof( CvPoint ); } scanner->seq_type1 = scanner->approx_method1 == CV_CHAIN_CODE ? CV_SEQ_CHAIN_CONTOUR : CV_SEQ_POLYGON; scanner->seq_type2 = scanner->approx_method2 == CV_CHAIN_CODE ? CV_SEQ_CHAIN_CONTOUR : CV_SEQ_POLYGON; cvSaveMemStoragePos( storage, &(scanner->initial_pos) ); if( method > CV_CHAIN_APPROX_SIMPLE ) { scanner->storage1 = cvCreateChildMemStorage( scanner->storage2 ); } if( mode > CV_RETR_LIST ) { scanner->cinfo_storage = cvCreateChildMemStorage( scanner->storage2 ); scanner->cinfo_set = cvCreateSet( 0, sizeof( CvSet ), sizeof( _CvContourInfo ), scanner->cinfo_storage ); if( scanner->cinfo_storage == 0 || scanner->cinfo_set == 0 ) CV_ERROR_FROM_STATUS( CV_OUTOFMEM_ERR ); } /* make zero borders */ memset( img, 0, size.width ); memset( img + step * (size.height - 1), 0, size.width ); for( y = 1, img += step; y < size.height - 1; y++, img += step ) { img[0] = img[size.width - 1] = 0; } /* converts all pixels to 0 or 1 */ cvThreshold( mat, mat, 0, 1, CV_THRESH_BINARY ); CV_CHECK(); __END__; if( cvGetErrStatus() < 0 ) cvFree( (void **)&scanner ); return scanner; }
int main( int argc, char** argv ) { int frameNum = 0; TrackerInfo tracker; DescInfo hogInfo; DescInfo hofInfo; DescInfo mbhInfo; char* video = argv[1]; arg_parse(argc, argv); Video capture(video); // std::cerr << "start_frame: " << start_frame << " end_frame: " << end_frame << " track_length: " << track_length << std::endl; // std::cerr << "min_distance: " << min_distance << " patch_size: " << patch_size << " nxy_cell: " << nxy_cell << " nt_cell: " << nt_cell << std::endl; InitTrackerInfo(&tracker, track_length, init_gap); InitDescInfo(&hogInfo, 8, 0, 1, patch_size, nxy_cell, nt_cell); InitDescInfo(&hofInfo, 9, 1, 1, patch_size, nxy_cell, nt_cell); InitDescInfo(&mbhInfo, 8, 0, 1, patch_size, nxy_cell, nt_cell); if( show_track == 1 ){ cvNamedWindow( "DenseTrack", 0 ); cvNamedWindow("Original", 0); } std::vector<std::list<Track> > xyScaleTracks; int init_counter = 0; // indicate when to detect new feature points while( true ) { IplImageWrapper frame = 0; int i, j, c; // get a new frame frame = capture.getFrame(); frameNum = capture.getFrameIndex(); if( !frame ) { printf("break"); break; } if( frameNum >= start_frame && frameNum <= end_frame ) { if( !image ) { // initailize all the buffers image = IplImageWrapper( cvGetSize(frame), 8, 3 ); image->origin = frame->origin; prev_image= IplImageWrapper( cvGetSize(frame), 8, 3 ); prev_image->origin = frame->origin; grey = IplImageWrapper( cvGetSize(frame), 8, 1 ); grey_pyramid = IplImagePyramid( cvGetSize(frame), 8, 1, scale_stride ); prev_grey = IplImageWrapper( cvGetSize(frame), 8, 1 ); prev_grey_pyramid = IplImagePyramid( cvGetSize(frame), 8, 1, scale_stride ); eig_pyramid = IplImagePyramid( cvGetSize(frame), 32, 1, scale_stride ); cvCopy( frame, image, 0 ); cvCvtColor( image, grey, CV_BGR2GRAY ); grey_pyramid.rebuild( grey ); // how many scale we can have scale_num = std::min<std::size_t>(scale_num, grey_pyramid.numOfLevels()); fscales = (float*)cvAlloc(scale_num*sizeof(float)); xyScaleTracks.resize(scale_num); for( int ixyScale = 0; ixyScale < scale_num; ++ixyScale ) { std::list<Track>& tracks = xyScaleTracks[ixyScale]; fscales[ixyScale] = pow(scale_stride, ixyScale); // find good features at each scale separately IplImage *grey_temp = 0, *eig_temp = 0; std::size_t temp_level = (std::size_t)ixyScale; grey_temp = cvCloneImage(grey_pyramid.getImage(temp_level)); eig_temp = cvCloneImage(eig_pyramid.getImage(temp_level)); std::vector<CvPoint2D32f> points(0); cvDenseSample(grey_temp, eig_temp, points, quality, min_distance); // save the feature points for( i = 0; i < points.size(); i++ ) { Track track(tracker.trackLength); PointDesc point(hogInfo, hofInfo, mbhInfo, points[i]); track.addPointDesc(point); tracks.push_back(track); } cvReleaseImage( &grey_temp ); cvReleaseImage( &eig_temp ); } } // build the image pyramid for the current frame cvCopy( frame, image, 0 ); cvCvtColor( image, grey, CV_BGR2GRAY ); grey_pyramid.rebuild(grey); if( frameNum > 0 ) { init_counter++; for( int ixyScale = 0; ixyScale < scale_num; ++ixyScale ) { // track feature points in each scale separately std::vector<CvPoint2D32f> points_in(0); std::list<Track>& tracks = xyScaleTracks[ixyScale]; for (std::list<Track>::iterator iTrack = tracks.begin(); iTrack != tracks.end(); ++iTrack) { CvPoint2D32f point = iTrack->pointDescs.back().point; points_in.push_back(point); // collect all the feature points } int count = points_in.size(); IplImage *prev_grey_temp = 0, *grey_temp = 0; std::size_t temp_level = ixyScale; prev_grey_temp = cvCloneImage(prev_grey_pyramid.getImage(temp_level)); grey_temp = cvCloneImage(grey_pyramid.getImage(temp_level)); cv::Mat prev_grey_mat = cv::cvarrToMat(prev_grey_temp); cv::Mat grey_mat = cv::cvarrToMat(grey_temp); std::vector<int> status(count); std::vector<CvPoint2D32f> points_out(count); // compute the optical flow IplImage* flow = cvCreateImage(cvGetSize(grey_temp), IPL_DEPTH_32F, 2); cv::Mat flow_mat = cv::cvarrToMat(flow); cv::calcOpticalFlowFarneback( prev_grey_mat, grey_mat, flow_mat, sqrt(2)/2.0, 5, 10, 2, 7, 1.5, cv::OPTFLOW_FARNEBACK_GAUSSIAN ); // track feature points by median filtering OpticalFlowTracker(flow, points_in, points_out, status); int width = grey_temp->width; int height = grey_temp->height; // compute the integral histograms DescMat* hogMat = InitDescMat(height, width, hogInfo.nBins); HogComp(prev_grey_temp, hogMat, hogInfo); DescMat* hofMat = InitDescMat(height, width, hofInfo.nBins); HofComp(flow, hofMat, hofInfo); DescMat* mbhMatX = InitDescMat(height, width, mbhInfo.nBins); DescMat* mbhMatY = InitDescMat(height, width, mbhInfo.nBins); MbhComp(flow, mbhMatX, mbhMatY, mbhInfo); i = 0; for (std::list<Track>::iterator iTrack = tracks.begin(); iTrack != tracks.end(); ++i) { if( status[i] == 1 ) { // if the feature point is successfully tracked PointDesc& pointDesc = iTrack->pointDescs.back(); CvPoint2D32f prev_point = points_in[i]; // get the descriptors for the feature point CvScalar rect = getRect(prev_point, cvSize(width, height), hogInfo); pointDesc.hog = getDesc(hogMat, rect, hogInfo); pointDesc.hof = getDesc(hofMat, rect, hofInfo); pointDesc.mbhX = getDesc(mbhMatX, rect, mbhInfo); pointDesc.mbhY = getDesc(mbhMatY, rect, mbhInfo); PointDesc point(hogInfo, hofInfo, mbhInfo, points_out[i]); iTrack->addPointDesc(point); // draw this track if( show_track == 1 ) { std::list<PointDesc>& descs = iTrack->pointDescs; std::list<PointDesc>::iterator iDesc = descs.begin(); float length = descs.size(); CvPoint2D32f point0 = iDesc->point; point0.x *= fscales[ixyScale]; // map the point to first scale point0.y *= fscales[ixyScale]; float j = 0; for (iDesc++; iDesc != descs.end(); ++iDesc, ++j) { CvPoint2D32f point1 = iDesc->point; point1.x *= fscales[ixyScale]; point1.y *= fscales[ixyScale]; cvLine(image, cvPointFrom32f(point0), cvPointFrom32f(point1), CV_RGB(0,cvFloor(255.0*(j+1.0)/length),0), 2, 8,0); point0 = point1; } cvCircle(image, cvPointFrom32f(point0), 2, CV_RGB(255,0,0), -1, 8,0); } ++iTrack; } else // remove the track, if we lose feature point iTrack = tracks.erase(iTrack); } ReleDescMat(hogMat); ReleDescMat(hofMat); ReleDescMat(mbhMatX); ReleDescMat(mbhMatY); cvReleaseImage( &prev_grey_temp ); cvReleaseImage( &grey_temp ); cvReleaseImage( &flow ); } for( int ixyScale = 0; ixyScale < scale_num; ++ixyScale ) { std::list<Track>& tracks = xyScaleTracks[ixyScale]; // output the features for each scale for( std::list<Track>::iterator iTrack = tracks.begin(); iTrack != tracks.end(); ) { if( iTrack->pointDescs.size() >= tracker.trackLength+1 ) { // if the trajectory achieves the length we want std::vector<CvPoint2D32f> trajectory(tracker.trackLength+1); std::list<PointDesc>& descs = iTrack->pointDescs; std::list<PointDesc>::iterator iDesc = descs.begin(); for (int count = 0; count <= tracker.trackLength; ++iDesc, ++count) { trajectory[count].x = iDesc->point.x*fscales[ixyScale]; trajectory[count].y = iDesc->point.y*fscales[ixyScale]; } float mean_x(0), mean_y(0), var_x(0), var_y(0), length(0); if( isValid(trajectory, mean_x, mean_y, var_x, var_y, length) == 1 ) { printf("%d\t", frameNum); printf("%f\t%f\t", mean_x, mean_y); printf("%f\t%f\t", var_x, var_y); printf("%f\t", length); printf("%f\t", fscales[ixyScale]); for (int count = 0; count < tracker.trackLength; ++count) printf("%f\t%f\t", trajectory[count].x,trajectory[count].y ); iDesc = descs.begin(); int t_stride = cvFloor(tracker.trackLength/hogInfo.ntCells); for( int n = 0; n < hogInfo.ntCells; n++ ) { std::vector<float> vec(hogInfo.dim); for( int t = 0; t < t_stride; t++, iDesc++ ) for( int m = 0; m < hogInfo.dim; m++ ) vec[m] += iDesc->hog[m]; for( int m = 0; m < hogInfo.dim; m++ ) printf("%f\t", vec[m]/float(t_stride)); } iDesc = descs.begin(); t_stride = cvFloor(tracker.trackLength/hofInfo.ntCells); for( int n = 0; n < hofInfo.ntCells; n++ ) { std::vector<float> vec(hofInfo.dim); for( int t = 0; t < t_stride; t++, iDesc++ ) for( int m = 0; m < hofInfo.dim; m++ ) vec[m] += iDesc->hof[m]; for( int m = 0; m < hofInfo.dim; m++ ) printf("%f\t", vec[m]/float(t_stride)); } iDesc = descs.begin(); t_stride = cvFloor(tracker.trackLength/mbhInfo.ntCells); for( int n = 0; n < mbhInfo.ntCells; n++ ) { std::vector<float> vec(mbhInfo.dim); for( int t = 0; t < t_stride; t++, iDesc++ ) for( int m = 0; m < mbhInfo.dim; m++ ) vec[m] += iDesc->mbhX[m]; for( int m = 0; m < mbhInfo.dim; m++ ) printf("%f\t", vec[m]/float(t_stride)); } iDesc = descs.begin(); t_stride = cvFloor(tracker.trackLength/mbhInfo.ntCells); for( int n = 0; n < mbhInfo.ntCells; n++ ) { std::vector<float> vec(mbhInfo.dim); for( int t = 0; t < t_stride; t++, iDesc++ ) for( int m = 0; m < mbhInfo.dim; m++ ) vec[m] += iDesc->mbhY[m]; for( int m = 0; m < mbhInfo.dim; m++ ) printf("%f\t", vec[m]/float(t_stride)); } printf("\n"); } iTrack = tracks.erase(iTrack); } else iTrack++; } } if( init_counter == tracker.initGap ) { // detect new feature points every initGap frames init_counter = 0; for (int ixyScale = 0; ixyScale < scale_num; ++ixyScale) { std::list<Track>& tracks = xyScaleTracks[ixyScale]; std::vector<CvPoint2D32f> points_in(0); std::vector<CvPoint2D32f> points_out(0); for(std::list<Track>::iterator iTrack = tracks.begin(); iTrack != tracks.end(); iTrack++, i++) { std::list<PointDesc>& descs = iTrack->pointDescs; CvPoint2D32f point = descs.back().point; // the last point in the track points_in.push_back(point); } IplImage *grey_temp = 0, *eig_temp = 0; std::size_t temp_level = (std::size_t)ixyScale; grey_temp = cvCloneImage(grey_pyramid.getImage(temp_level)); eig_temp = cvCloneImage(eig_pyramid.getImage(temp_level)); cvDenseSample(grey_temp, eig_temp, points_in, points_out, quality, min_distance); // save the new feature points for( i = 0; i < points_out.size(); i++) { Track track(tracker.trackLength); PointDesc point(hogInfo, hofInfo, mbhInfo, points_out[i]); track.addPointDesc(point); tracks.push_back(track); } cvReleaseImage( &grey_temp ); cvReleaseImage( &eig_temp ); } } } cvCopy( frame, prev_image, 0 ); cvCvtColor( prev_image, prev_grey, CV_BGR2GRAY ); prev_grey_pyramid.rebuild(prev_grey); } if( show_track == 1 ) { cvShowImage( "DenseTrack", image); cvShowImage("Original", frame); c = cvWaitKey(3); if((char)c == 27) break; } // get the next frame if (!capture.nextFrame()) break; } if( show_track == 1 ) cvDestroyWindow("DenseTrack"); return 0; }
CV_IMPL CvKalman* cvCreateKalman( int DP, int MP, int CP ) { CvKalman *kalman = 0; if( DP <= 0 || MP <= 0 ) CV_Error( CV_StsOutOfRange, "state and measurement vectors must have positive number of dimensions" ); if( CP < 0 ) CP = DP; /* allocating memory for the structure */ kalman = (CvKalman *)cvAlloc( sizeof( CvKalman )); memset( kalman, 0, sizeof(*kalman)); kalman->DP = DP; kalman->MP = MP; kalman->CP = CP; kalman->state_pre = cvCreateMat( DP, 1, CV_32FC1 ); cvZero( kalman->state_pre ); kalman->state_post = cvCreateMat( DP, 1, CV_32FC1 ); cvZero( kalman->state_post ); kalman->transition_matrix = cvCreateMat( DP, DP, CV_32FC1 ); cvSetIdentity( kalman->transition_matrix ); kalman->process_noise_cov = cvCreateMat( DP, DP, CV_32FC1 ); cvSetIdentity( kalman->process_noise_cov ); kalman->measurement_matrix = cvCreateMat( MP, DP, CV_32FC1 ); cvZero( kalman->measurement_matrix ); kalman->measurement_noise_cov = cvCreateMat( MP, MP, CV_32FC1 ); cvSetIdentity( kalman->measurement_noise_cov ); kalman->error_cov_pre = cvCreateMat( DP, DP, CV_32FC1 ); kalman->error_cov_post = cvCreateMat( DP, DP, CV_32FC1 ); cvZero( kalman->error_cov_post ); kalman->gain = cvCreateMat( DP, MP, CV_32FC1 ); if( CP > 0 ) { kalman->control_matrix = cvCreateMat( DP, CP, CV_32FC1 ); cvZero( kalman->control_matrix ); } kalman->temp1 = cvCreateMat( DP, DP, CV_32FC1 ); kalman->temp2 = cvCreateMat( MP, DP, CV_32FC1 ); kalman->temp3 = cvCreateMat( MP, MP, CV_32FC1 ); kalman->temp4 = cvCreateMat( MP, DP, CV_32FC1 ); kalman->temp5 = cvCreateMat( MP, 1, CV_32FC1 ); #if 1 kalman->PosterState = kalman->state_pre->data.fl; kalman->PriorState = kalman->state_post->data.fl; kalman->DynamMatr = kalman->transition_matrix->data.fl; kalman->MeasurementMatr = kalman->measurement_matrix->data.fl; kalman->MNCovariance = kalman->measurement_noise_cov->data.fl; kalman->PNCovariance = kalman->process_noise_cov->data.fl; kalman->KalmGainMatr = kalman->gain->data.fl; kalman->PriorErrorCovariance = kalman->error_cov_pre->data.fl; kalman->PosterErrorCovariance = kalman->error_cov_post->data.fl; #endif return kalman; }
void pkmGaussianMixtureModel::modelData(int minComponents, int maxComponents, double regularizingFactor, double stoppingThreshold) { // indicator will contain the assignments of each data point to // the mixture components, as result of the E-step // double * indicator = new double[k * m_nObservations]; //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// // // Use as an initial approxiamation, a diagonal covariance matrix // taken from the mean covariances // Could instead use K-Means (see opencv function, kmeans2) // // Alternatively, the algorithm may start with M-step when // initial values for pi,k can be provided. Another alternative, // when pi,k are unknown, is to use a simpler clustering algorithm // to pre-cluster the input samples and thus obtain initial pi,k. // Often (and in ML) k-means algorithm is used for that purpose. // // One of the main that EM algorithm should deal with is the large // number of parameters to estimate. The majority of the parameters // sits in covariation matrices, which are d×d elements each // (where d is the feature space dimensionality). However, in many // practical problems the covariation matrices are close to diagonal, // or even to μk*I, where I is identity matrix and μk is // mixture-dependent "scale" parameter. So a robust computation // scheme could be to start with the harder constraints on the // covariation matrices and then use the estimated parameters as an // input for a less constrained optimization problem (often a // diagonal covariation matrix is already a good enough approximation). // // References: // // 1. [Bilmes98] J. A. Bilmes. A Gentle Tutorial of the EM Algorithm // and its Application to Parameter Estimation for Gaussian Mixture // and Hidden Markov Models. Technical Report TR-97-021, // International Computer Science Institute and Computer Science // Division, University of California at Berkeley, April 1998. //// This code is for indexing (observations x variables) emModel = new CvEM[maxComponents-minComponents+1]; //////////////////////////////////////////////////////////// // EM int i; double minBIC = HUGE_VAL; if(maxComponents >= m_nObservations) { maxComponents = m_nObservations-1; } if(minComponents > maxComponents) { minComponents = maxComponents = m_nObservations-1; } for (int k = minComponents; k <= maxComponents; k++) { #if 0 ////////////////////////////////////////////////////////////// // Create a list of random indexes from 1 : K // from the permutations of the number of observations int * randIndex = new int[m_nObservations]; // 1:N for (i = 0; i < m_nObservations; i++) randIndex[i] = i; // Shuffle the array for (i = 0; i < (m_nObservations-1); i++) { // Random position int r = i + (rand() % (m_nObservations-i)); // Swap int temp = randIndex[i]; randIndex[i] = randIndex[r]; randIndex[r] = temp; } ////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// // Random initial kernels float * estMU = new float[k*m_nVariables]; for( int row = 0; row < k; row++ ) { int ind = randIndex[row]; for( int col = 0; col < m_nVariables; col++ ) { // Get each variable at index ind (of the random kernels) // from the input data into estMu estMU[row*m_nVariables+col] = ((float*)(m_pCvData->data.ptr + m_pCvData->step*ind))[col]; } } CvMat param_mean; cvInitMatHeader(¶m_mean, k, m_nVariables, CV_32FC1, estMU); //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// // Calculate the Covariance matrix (assume this is a 2x2 Matrix) CvMat *m_pCvCov = cvCreateMat(m_nVariables, m_nVariables, CV_32FC1); CvMat *m_pCvMu = cvCreateMat(m_nVariables, 1, CV_32FC1); CvMat **dat = (CvMat**)cvAlloc( m_nObservations * sizeof(*dat) ); for (i = 0; i < m_nObservations; i++) { CvMat *tempData = cvCreateMat(m_nVariables, 1, CV_32FC1); CV_MAT_ELEM(*tempData, float, 0, 0) = CV_MAT_ELEM(*m_pCvData, float, i, 0); CV_MAT_ELEM(*tempData, float, 1, 0) = CV_MAT_ELEM(*m_pCvData, float, i, 1); dat[i] = tempData; } cvCalcCovarMatrix((const CvArr**)dat, m_nObservations, m_pCvCov, m_pCvMu, CV_COVAR_NORMAL); //|CV_COVAR_SCALE); // Store k (all axes) Matrices of Diagonal Covariance Matrices // initialized to 1/10th of the max of the diag values // of the mean variance as the estimated covariances CvMat **param_cov = (CvMat**)cvAlloc( k * sizeof(*param_cov) ); float covMax = MAX(CV_MAT_ELEM(*m_pCvCov, float, 0, 0), CV_MAT_ELEM(*m_pCvCov, float, 1, 1)) / 10.; for (int kern = 0; kern < k; kern++) { CvMat *tempData = cvCreateMat(m_nVariables, m_nVariables, CV_32FC1); CV_MAT_ELEM(*tempData, float, 0, 0) = covMax; CV_MAT_ELEM(*tempData, float, 0, 1) = 0.0f; CV_MAT_ELEM(*tempData, float, 1, 0) = 0.0f; CV_MAT_ELEM(*tempData, float, 1, 1) = covMax; param_cov[kern] = tempData; } //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// // Random mixing probabilities for each kernel float * estPP = new float[k]; for (i = 0; i < k; i++) { estPP[i] = 1.0/(float)k; } // Weights for each kernel CvMat param_weight; cvInitMatHeader(¶m_weight, k, 1, CV_32FC1, estPP); //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// float *estProb = new float[k*m_nObservations]; for (i = 0; i < k; i++) { for(int j = 0; j < m_nObservations; j++) { estProb[i*j] = estPP[i] / 2.0; } } // Create a Cv Matrix for the mix prob CvMat param_prob; cvInitMatHeader(¶m_prob, m_nObservations, k, CV_32FC1, estProb); //////////////////////////////////////////////////////////// // Initialize parameters CvEMParams emParam; emParam.covs = (const CvMat **)param_cov; emParam.means = ¶m_mean; emParam.weights = ¶m_weight; emParam.probs = NULL;//¶m_prob; emParam.nclusters = k+1; emParam.cov_mat_type = CvEM::COV_MAT_GENERIC;//CvEM::COV_MAT_DIAGONAL;////CvEM::COV_MAT_SPHERICAL; emParam.start_step = CvEM::START_E_STEP; //CvEM::START_AUTO_STEP; // initialize with k-means emParam.term_crit.epsilon = 0.00001; emParam.term_crit.max_iter = 50; emParam.term_crit.type = CV_TERMCRIT_ITER | CV_TERMCRIT_EPS; // Train emModel[k-minComponents].train(m_pCvData, 0, emParam, 0); double thisLikelihood = emModel[k-minComponents].get_log_likelihood(); //double BIC = -2.*thisLikelihood - (double)k*log((double)m_nObservations*10); double BIC = -m_nObservations*thisLikelihood + k/2.*log((double)m_nObservations); printf("K: %d, BIC: %f\n", k, BIC); if (BIC < minLikelihood) { bestModel = k-minComponents; minLikelihood = BIC; } delete [] randIndex; delete [] estMU; delete [] estPP; #else CvEMParams emParam; emParam.covs = NULL; emParam.means = NULL; emParam.weights = NULL; emParam.probs = NULL; emParam.nclusters = k; emParam.cov_mat_type = m_covType;//CvEM::COV_MAT_SPHERICAL;//CvEM::COV_MAT_DIAGONAL;////CvEM::COV_MAT_GENERIC;//; emParam.start_step = CvEM::START_AUTO_STEP; //CvEM::START_AUTO_STEP; // initialize with k-means emParam.term_crit.epsilon = 0.01; emParam.term_crit.max_iter = 100; emParam.term_crit.type = CV_TERMCRIT_ITER | CV_TERMCRIT_EPS; // Train emModel[k-minComponents].train(m_pCvData, 0, emParam, 0); // Calculate the log likelihood of the model const CvMat *weights = emModel[k-minComponents].get_weights(); const CvMat *probs = emModel[k-minComponents].get_probs(); const CvMat **modelCovs = emModel[k-minComponents].get_covs(); const CvMat *modelMus = emModel[k-minComponents].get_means(); const CvMat *modelWeights = emModel[k-minComponents].get_weights(); double thisLikelihood; if(k == 1) // mlem.cpp does not calculate the log_likelihood for 1 cluster // (why i have no idea?! it sets log_likelihood = DBL_MAX/1000.;!?) // so i compute it here. though this seems to pair up with the // same value you get for 2 kernels, it does not pair up for // anything higher? { double _log_likelihood = 0;//-CV_LOG2PI * (double)m_nObservations * (double)m_nVariables / 2.; CvMat *pts = cvCreateMat(m_nVariables, 1, CV_64FC1); CvMat *mean = cvCreateMat(m_nVariables, 1, CV_64FC1); for( int n = 0; n < m_nObservations; n++ ) { double sum = 0; cvmSet(pts, 0, 0, cvmGet(m_pCvData, n, 0)); cvmSet(pts, 1, 0, cvmGet(m_pCvData, n, 1)); double* pp = (double*)(probs->data.ptr + probs->step*n); for( int d = 0; d < k; d++ ) { const CvMat * covar = modelCovs[d]; cvmSet(mean, 0, 0, cvmGet(modelMus, d, 0)); cvmSet(mean, 1, 0, cvmGet(modelMus, d, 1)); double p_x = multinormalDistribution(pts, mean, covar); double w_k = cvmGet(weights, 0, d); sum += p_x * w_k;// * pp[d]; //printf("%f + %f += %f\n", p_x, w_k, sum); } _log_likelihood -= log(sum); } thisLikelihood = -_log_likelihood;//emModel[k-minComponents].get_log_likelihood(); } else { thisLikelihood = emModel[k-minComponents].get_log_likelihood(); } // Calculate the Bit Information Criterion for Model Selection double vars = (double)m_nVariables; double N_p = ((double)k-1.)+(double)k*(vars + vars*(vars+1.)/2.); double BIC = -2.*thisLikelihood + N_p*log((double)m_nObservations); //printf("K: %d, like: %f, BIC: %f\n", k, thisLikelihood, BIC); if (BIC < minBIC) { // update variables with the best bic and best model subscript bestModel = k-minComponents; minBIC = BIC; // store the bic and likelihood for printing later m_BIC = BIC; m_Likelihood = thisLikelihood; } #endif } bModeled = true; // m_pCvProb = emModel.get_probs; }
static void icvMorphOp( const void* srcarr, void* dstarr, IplConvKernel* element, int iterations, int mop ) { CvMorphology morphology; void* buffer = 0; int local_alloc = 0; void* morphstate = 0; CvMat* temp = 0; CV_FUNCNAME( "icvMorphOp" ); __BEGIN__; int i, coi1 = 0, coi2 = 0; CvMat srcstub, *src = (CvMat*)srcarr; CvMat dststub, *dst = (CvMat*)dstarr; CvMat el_hdr, *el = 0; CvSize size, el_size; CvPoint el_anchor; int el_shape; int type; bool inplace; if( !CV_IS_MAT(src) ) CV_CALL( src = cvGetMat( src, &srcstub, &coi1 )); if( src != &srcstub ) { srcstub = *src; src = &srcstub; } if( dstarr == srcarr ) dst = src; else { CV_CALL( dst = cvGetMat( dst, &dststub, &coi2 )); if( !CV_ARE_TYPES_EQ( src, dst )) CV_ERROR( CV_StsUnmatchedFormats, "" ); if( !CV_ARE_SIZES_EQ( src, dst )) CV_ERROR( CV_StsUnmatchedSizes, "" ); } if( dst != &dststub ) { dststub = *dst; dst = &dststub; } if( coi1 != 0 || coi2 != 0 ) CV_ERROR( CV_BadCOI, "" ); type = CV_MAT_TYPE( src->type ); size = cvGetMatSize( src ); inplace = src->data.ptr == dst->data.ptr; if( iterations == 0 || (element && element->nCols == 1 && element->nRows == 1)) { if( src->data.ptr != dst->data.ptr ) cvCopy( src, dst ); EXIT; } if( element ) { el_size = cvSize( element->nCols, element->nRows ); el_anchor = cvPoint( element->anchorX, element->anchorY ); el_shape = (int)(element->nShiftR); el_shape = el_shape < CV_SHAPE_CUSTOM ? el_shape : CV_SHAPE_CUSTOM; } else { el_size = cvSize(3,3); el_anchor = cvPoint(1,1); el_shape = CV_SHAPE_RECT; } if( el_shape == CV_SHAPE_RECT && iterations > 1 ) { el_size.width = 1 + (el_size.width-1)*iterations; el_size.height = 1 + (el_size.height-1)*iterations; el_anchor.x *= iterations; el_anchor.y *= iterations; iterations = 1; } if( el_shape == CV_SHAPE_RECT && icvErodeRect_GetBufSize_8u_C1R_p ) { CvMorphRectFunc_IPP rect_func = 0; CvMorphRectGetBufSizeFunc_IPP rect_getbufsize_func = 0; if( mop == 0 ) { if( type == CV_8UC1 ) rect_getbufsize_func = icvErodeRect_GetBufSize_8u_C1R_p, rect_func = icvErodeRect_8u_C1R_p; else if( type == CV_8UC3 ) rect_getbufsize_func = icvErodeRect_GetBufSize_8u_C3R_p, rect_func = icvErodeRect_8u_C3R_p; else if( type == CV_8UC4 ) rect_getbufsize_func = icvErodeRect_GetBufSize_8u_C4R_p, rect_func = icvErodeRect_8u_C4R_p; else if( type == CV_16UC1 ) rect_getbufsize_func = icvErodeRect_GetBufSize_16u_C1R_p, rect_func = icvErodeRect_16u_C1R_p; else if( type == CV_16UC3 ) rect_getbufsize_func = icvErodeRect_GetBufSize_16u_C3R_p, rect_func = icvErodeRect_16u_C3R_p; else if( type == CV_16UC4 ) rect_getbufsize_func = icvErodeRect_GetBufSize_16u_C4R_p, rect_func = icvErodeRect_16u_C4R_p; else if( type == CV_32FC1 ) rect_getbufsize_func = icvErodeRect_GetBufSize_32f_C1R_p, rect_func = icvErodeRect_32f_C1R_p; else if( type == CV_32FC3 ) rect_getbufsize_func = icvErodeRect_GetBufSize_32f_C3R_p, rect_func = icvErodeRect_32f_C3R_p; else if( type == CV_32FC4 ) rect_getbufsize_func = icvErodeRect_GetBufSize_32f_C4R_p, rect_func = icvErodeRect_32f_C4R_p; } else { if( type == CV_8UC1 ) rect_getbufsize_func = icvDilateRect_GetBufSize_8u_C1R_p, rect_func = icvDilateRect_8u_C1R_p; else if( type == CV_8UC3 ) rect_getbufsize_func = icvDilateRect_GetBufSize_8u_C3R_p, rect_func = icvDilateRect_8u_C3R_p; else if( type == CV_8UC4 ) rect_getbufsize_func = icvDilateRect_GetBufSize_8u_C4R_p, rect_func = icvDilateRect_8u_C4R_p; else if( type == CV_16UC1 ) rect_getbufsize_func = icvDilateRect_GetBufSize_16u_C1R_p, rect_func = icvDilateRect_16u_C1R_p; else if( type == CV_16UC3 ) rect_getbufsize_func = icvDilateRect_GetBufSize_16u_C3R_p, rect_func = icvDilateRect_16u_C3R_p; else if( type == CV_16UC4 ) rect_getbufsize_func = icvDilateRect_GetBufSize_16u_C4R_p, rect_func = icvDilateRect_16u_C4R_p; else if( type == CV_32FC1 ) rect_getbufsize_func = icvDilateRect_GetBufSize_32f_C1R_p, rect_func = icvDilateRect_32f_C1R_p; else if( type == CV_32FC3 ) rect_getbufsize_func = icvDilateRect_GetBufSize_32f_C3R_p, rect_func = icvDilateRect_32f_C3R_p; else if( type == CV_32FC4 ) rect_getbufsize_func = icvDilateRect_GetBufSize_32f_C4R_p, rect_func = icvDilateRect_32f_C4R_p; } if( rect_getbufsize_func && rect_func ) { int bufsize = 0; CvStatus status = rect_getbufsize_func( size.width, el_size, &bufsize ); if( status >= 0 && bufsize > 0 ) { if( bufsize < CV_MAX_LOCAL_SIZE ) { buffer = cvStackAlloc( bufsize ); local_alloc = 1; } else CV_CALL( buffer = cvAlloc( bufsize )); } if( status >= 0 ) { int src_step, dst_step = dst->step ? dst->step : CV_STUB_STEP; if( inplace ) { CV_CALL( temp = cvCloneMat( dst )); src = temp; } src_step = src->step ? src->step : CV_STUB_STEP; status = rect_func( src->data.ptr, src_step, dst->data.ptr, dst_step, size, el_size, el_anchor, buffer ); } if( status >= 0 ) EXIT; } } else if( el_shape == CV_SHAPE_CUSTOM && icvMorphInitAlloc_8u_C1R_p && icvMorphFree_p && src->data.ptr != dst->data.ptr ) { CvMorphCustomFunc_IPP custom_func = 0; CvMorphCustomInitAllocFunc_IPP custom_initalloc_func = 0; const int bordertype = 1; // replication border if( type == CV_8UC1 ) custom_initalloc_func = icvMorphInitAlloc_8u_C1R_p, custom_func = mop == 0 ? icvErode_8u_C1R_p : icvDilate_8u_C1R_p; else if( type == CV_8UC3 ) custom_initalloc_func = icvMorphInitAlloc_8u_C3R_p, custom_func = mop == 0 ? icvErode_8u_C3R_p : icvDilate_8u_C3R_p; else if( type == CV_8UC4 ) custom_initalloc_func = icvMorphInitAlloc_8u_C4R_p, custom_func = mop == 0 ? icvErode_8u_C4R_p : icvDilate_8u_C4R_p; else if( type == CV_16UC1 ) custom_initalloc_func = icvMorphInitAlloc_16u_C1R_p, custom_func = mop == 0 ? icvErode_16u_C1R_p : icvDilate_16u_C1R_p; else if( type == CV_16UC3 ) custom_initalloc_func = icvMorphInitAlloc_16u_C3R_p, custom_func = mop == 0 ? icvErode_16u_C3R_p : icvDilate_16u_C3R_p; else if( type == CV_16UC4 ) custom_initalloc_func = icvMorphInitAlloc_16u_C4R_p, custom_func = mop == 0 ? icvErode_16u_C4R_p : icvDilate_16u_C4R_p; else if( type == CV_32FC1 ) custom_initalloc_func = icvMorphInitAlloc_32f_C1R_p, custom_func = mop == 0 ? icvErode_32f_C1R_p : icvDilate_32f_C1R_p; else if( type == CV_32FC3 ) custom_initalloc_func = icvMorphInitAlloc_32f_C3R_p, custom_func = mop == 0 ? icvErode_32f_C3R_p : icvDilate_32f_C3R_p; else if( type == CV_32FC4 ) custom_initalloc_func = icvMorphInitAlloc_32f_C4R_p, custom_func = mop == 0 ? icvErode_32f_C4R_p : icvDilate_32f_C4R_p; if( custom_initalloc_func && custom_func ) { uchar *src_ptr, *dst_ptr = dst->data.ptr; int src_step, dst_step = dst->step ? dst->step : CV_STUB_STEP; int el_len = el_size.width*el_size.height; uchar* el_mask = (uchar*)cvStackAlloc( el_len ); CvStatus status; for( i = 0; i < el_len; i++ ) el_mask[i] = (uchar)(element->values[i] != 0); status = custom_initalloc_func( size.width, el_mask, el_size, el_anchor, &morphstate ); if( status >= 0 && (inplace || iterations > 1) ) { CV_CALL( temp = cvCloneMat( src )); src = temp; } src_ptr = src->data.ptr; src_step = src->step ? src->step : CV_STUB_STEP; for( i = 0; i < iterations && status >= 0 && morphstate; i++ ) { uchar* t_ptr; int t_step; status = custom_func( src_ptr, src_step, dst_ptr, dst_step, size, bordertype, morphstate ); CV_SWAP( src_ptr, dst_ptr, t_ptr ); CV_SWAP( src_step, dst_step, t_step ); if( i == 0 && temp ) { dst_ptr = temp->data.ptr; dst_step = temp->step ? temp->step : CV_STUB_STEP; } } if( status >= 0 ) { if( iterations % 2 == 0 ) cvCopy( temp, dst ); EXIT; } } } if( el_shape != CV_SHAPE_RECT ) { el_hdr = cvMat( element->nRows, element->nCols, CV_32SC1, element->values ); el = &el_hdr; el_shape = CV_SHAPE_CUSTOM; } CV_CALL( morphology.init( mop, src->cols, src->type, el_shape, el, el_size, el_anchor )); for( i = 0; i < iterations; i++ ) { CV_CALL( morphology.process( src, dst )); src = dst; } __END__; if( !local_alloc ) cvFree( &buffer ); if( morphstate ) icvMorphFree_p( morphstate ); cvReleaseMat( &temp ); }
CV_IMPL int cvNamedWindow( const char* name, int flags ) { int result = 0; CV_FUNCNAME( "cvNamedWindow" ); __BEGIN__; CvWindow* window; int len; cvInitSystem(1,(char**)&name); if( !name ) CV_ERROR( CV_StsNullPtr, "NULL name string" ); // Check the name in the storage if( icvFindWindowByName( name ) != 0 ) { result = 1; EXIT; } len = strlen(name); CV_CALL( window = (CvWindow*)cvAlloc(sizeof(CvWindow) + len + 1)); memset( window, 0, sizeof(*window)); window->name = (char*)(window + 1); memcpy( window->name, name, len + 1 ); window->flags = flags; window->signature = CV_WINDOW_MAGIC_VAL; window->last_key = 0; window->on_mouse = 0; window->on_mouse_param = 0; memset( &window->toolbar, 0, sizeof(window->toolbar)); window->next = hg_windows; window->prev = 0; CV_LOCK_MUTEX(); window->frame = gtk_window_new( GTK_WINDOW_TOPLEVEL ); window->paned = gtk_vbox_new( FALSE, 0 ); window->widget = cvImageWidgetNew( flags ); gtk_box_pack_end( GTK_BOX(window->paned), window->widget, TRUE, TRUE, 0 ); gtk_widget_show( window->widget ); gtk_container_add( GTK_CONTAINER(window->frame), window->paned ); gtk_widget_show( window->paned ); // // configure event handlers // TODO -- move this to CvImageWidget ? gtk_signal_connect( GTK_OBJECT(window->frame), "key-press-event", GTK_SIGNAL_FUNC(icvOnKeyPress), window ); gtk_signal_connect( GTK_OBJECT(window->widget), "button-press-event", GTK_SIGNAL_FUNC(icvOnMouse), window ); gtk_signal_connect( GTK_OBJECT(window->widget), "button-release-event", GTK_SIGNAL_FUNC(icvOnMouse), window ); gtk_signal_connect( GTK_OBJECT(window->widget), "motion-notify-event", GTK_SIGNAL_FUNC(icvOnMouse), window ); gtk_signal_connect( GTK_OBJECT(window->frame), "delete-event", GTK_SIGNAL_FUNC(icvOnClose), window ); gtk_widget_add_events (window->widget, GDK_EXPOSURE_MASK | GDK_BUTTON_RELEASE_MASK | GDK_BUTTON_PRESS_MASK | GDK_POINTER_MOTION_MASK) ; gtk_widget_show( window->frame ); gtk_window_set_title( GTK_WINDOW(window->frame), name ); if( hg_windows ) hg_windows->prev = window; hg_windows = window; gtk_window_set_resizable( GTK_WINDOW(window->frame), (flags & CV_WINDOW_AUTOSIZE) == 0 ); // allow window to be resized if( (flags & CV_WINDOW_AUTOSIZE)==0 ){ GdkGeometry geometry; geometry.min_width = 50; geometry.min_height = 50; gtk_window_set_geometry_hints( GTK_WINDOW( window->frame ), GTK_WIDGET( window->widget ), &geometry, (GdkWindowHints) (GDK_HINT_MIN_SIZE)); } CV_UNLOCK_MUTEX(); result = 1; __END__; return result; }
/* area of a contour sector */ static double icvContourSecArea( CvSeq * contour, CvSlice slice ) { CvPoint pt; /* pointer to points */ CvPoint pt_s, pt_e; /* first and last points */ CvSeqReader reader; /* points reader of contour */ int p_max = 2, p_ind; int lpt, flag, i; double a00; /* unnormalized moments m00 */ double xi, yi, xi_1, yi_1, x0, y0, dxy, sk, sk1, t; double x_s, y_s, nx, ny, dx, dy, du, dv; double eps = 1.e-5; double *p_are1, *p_are2, *p_are; double area = 0; CV_Assert( contour != NULL && CV_IS_SEQ_POINT_SET( contour )); lpt = cvSliceLength( slice, contour ); /*if( n2 >= n1 ) lpt = n2 - n1 + 1; else lpt = contour->total - n1 + n2 + 1;*/ if( contour->total <= 0 || lpt <= 2 ) return 0.; a00 = x0 = y0 = xi_1 = yi_1 = 0; sk1 = 0; flag = 0; dxy = 0; p_are1 = (double *) cvAlloc( p_max * sizeof( double )); p_are = p_are1; p_are2 = NULL; cvStartReadSeq( contour, &reader, 0 ); cvSetSeqReaderPos( &reader, slice.start_index ); CV_READ_SEQ_ELEM( pt_s, reader ); p_ind = 0; cvSetSeqReaderPos( &reader, slice.end_index ); CV_READ_SEQ_ELEM( pt_e, reader ); /* normal coefficients */ nx = pt_s.y - pt_e.y; ny = pt_e.x - pt_s.x; cvSetSeqReaderPos( &reader, slice.start_index ); while( lpt-- > 0 ) { CV_READ_SEQ_ELEM( pt, reader ); if( flag == 0 ) { xi_1 = (double) pt.x; yi_1 = (double) pt.y; x0 = xi_1; y0 = yi_1; sk1 = 0; flag = 1; } else { xi = (double) pt.x; yi = (double) pt.y; /**************** edges intersection examination **************************/ sk = nx * (xi - pt_s.x) + ny * (yi - pt_s.y); if( (fabs( sk ) < eps && lpt > 0) || sk * sk1 < -eps ) { if( fabs( sk ) < eps ) { dxy = xi_1 * yi - xi * yi_1; a00 = a00 + dxy; dxy = xi * y0 - x0 * yi; a00 = a00 + dxy; if( p_ind >= p_max ) icvMemCopy( &p_are1, &p_are2, &p_are, &p_max ); p_are[p_ind] = a00 / 2.; p_ind++; a00 = 0; sk1 = 0; x0 = xi; y0 = yi; dxy = 0; } else { /* define intersection point */ dv = yi - yi_1; du = xi - xi_1; dx = ny; dy = -nx; if( fabs( du ) > eps ) t = ((yi_1 - pt_s.y) * du + dv * (pt_s.x - xi_1)) / (du * dy - dx * dv); else t = (xi_1 - pt_s.x) / dx; if( t > eps && t < 1 - eps ) { x_s = pt_s.x + t * dx; y_s = pt_s.y + t * dy; dxy = xi_1 * y_s - x_s * yi_1; a00 += dxy; dxy = x_s * y0 - x0 * y_s; a00 += dxy; if( p_ind >= p_max ) icvMemCopy( &p_are1, &p_are2, &p_are, &p_max ); p_are[p_ind] = a00 / 2.; p_ind++; a00 = 0; sk1 = 0; x0 = x_s; y0 = y_s; dxy = x_s * yi - xi * y_s; } } } else dxy = xi_1 * yi - xi * yi_1; a00 += dxy; xi_1 = xi; yi_1 = yi; sk1 = sk; } } xi = x0; yi = y0; dxy = xi_1 * yi - xi * yi_1; a00 += dxy; if( p_ind >= p_max ) icvMemCopy( &p_are1, &p_are2, &p_are, &p_max ); p_are[p_ind] = a00 / 2.; p_ind++; // common area calculation area = 0; for( i = 0; i < p_ind; i++ ) area += fabs( p_are[i] ); if( p_are1 != NULL ) cvFree( &p_are1 ); else if( p_are2 != NULL ) cvFree( &p_are2 ); return area; }
CV_IMPL int cvCreateTrackbar( const char* trackbar_name, const char* window_name, int* val, int count, CvTrackbarCallback on_notify ) { int result = 0; CV_FUNCNAME( "cvCreateTrackbar" ); __BEGIN__; /*char slider_name[32];*/ CvWindow* window = 0; CvTrackbar* trackbar = 0; if( !window_name || !trackbar_name ) CV_ERROR( CV_StsNullPtr, "NULL window or trackbar name" ); if( count <= 0 ) CV_ERROR( CV_StsOutOfRange, "Bad trackbar maximal value" ); window = icvFindWindowByName(window_name); if( !window ) EXIT; trackbar = icvFindTrackbarByName(window,trackbar_name); CV_LOCK_MUTEX(); if( !trackbar ) { int len = strlen(trackbar_name); trackbar = (CvTrackbar*)cvAlloc(sizeof(CvTrackbar) + len + 1); memset( trackbar, 0, sizeof(*trackbar)); trackbar->signature = CV_TRACKBAR_MAGIC_VAL; trackbar->name = (char*)(trackbar+1); memcpy( trackbar->name, trackbar_name, len + 1 ); trackbar->parent = window; trackbar->next = window->toolbar.first; window->toolbar.first = trackbar; GtkWidget* hscale_box = gtk_hbox_new( FALSE, 10 ); GtkWidget* hscale_label = gtk_label_new( trackbar_name ); GtkWidget* hscale = gtk_hscale_new_with_range( 0, count, 1 ); gtk_range_set_update_policy( GTK_RANGE(hscale), GTK_UPDATE_CONTINUOUS ); gtk_scale_set_digits( GTK_SCALE(hscale), 0 ); //gtk_scale_set_value_pos( hscale, GTK_POS_TOP ); gtk_scale_set_draw_value( GTK_SCALE(hscale), TRUE ); trackbar->widget = hscale; gtk_box_pack_start( GTK_BOX(hscale_box), hscale_label, FALSE, FALSE, 5 ); gtk_widget_show( hscale_label ); gtk_box_pack_start( GTK_BOX(hscale_box), hscale, TRUE, TRUE, 5 ); gtk_widget_show( hscale ); gtk_box_pack_start( GTK_BOX(window->paned), hscale_box, FALSE, FALSE, 5 ); gtk_widget_show( hscale_box ); } if( val ) { int value = *val; if( value < 0 ) value = 0; if( value > count ) value = count; gtk_range_set_value( GTK_RANGE(trackbar->widget), value ); trackbar->pos = value; trackbar->data = val; } trackbar->maxval = count; trackbar->notify = on_notify; gtk_signal_connect( GTK_OBJECT(trackbar->widget), "value-changed", GTK_SIGNAL_FUNC(icvOnTrackbar), trackbar ); // queue a widget resize to trigger a window resize to // compensate for the addition of trackbars gtk_widget_queue_resize( GTK_WIDGET(window->widget) ); CV_UNLOCK_MUTEX(); result = 1; __END__; return result; }
int main(int argc, char* argv[]) { CvMemStorage *contStorage = cvCreateMemStorage(0); CvSeq *contours; CvTreeNodeIterator polyIterator; CvMemStorage *mallet_storage; CvSeq *mallet_circles = 0; float *mallet_p; int mi; int found = 0; int i; CvPoint poly_point; int fps=30; int npts[2] = { 4, 12 }; CvPoint **pts; pts = (CvPoint **) cvAlloc (sizeof (CvPoint *) * 2); pts[0] = (CvPoint *) cvAlloc (sizeof (CvPoint) * 4); pts[1] = (CvPoint *) cvAlloc (sizeof (CvPoint) * 12); pts[0][0] = cvPoint(0,0); pts[0][1] = cvPoint(160,0); pts[0][2] = cvPoint(320,240); pts[0][3] = cvPoint(0,240); pts[1][0] = cvPoint(39,17); pts[1][1] = cvPoint(126,15); pts[1][2] = cvPoint(147,26); pts[1][3] = cvPoint(160,77); pts[1][4] = cvPoint(160,164); pts[1][5] = cvPoint(145,224); pts[1][6] = cvPoint(125,233); pts[1][7] = cvPoint(39,233); pts[1][8] = cvPoint(15,217); pts[1][9] = cvPoint(0,133); pts[1][10] = cvPoint(0,115); pts[1][11] = cvPoint(17,28); // ポリライン近似 CvMemStorage *polyStorage = cvCreateMemStorage(0); CvSeq *polys, *poly; // OpenCV variables CvFont font; printf("start!\n"); //pwm initialize if(gpioInitialise() < 0) return -1; //pigpio CW/CCW pin setup //X:18, Y1:14, Y2:15 gpioSetMode(18, PI_OUTPUT); gpioSetMode(14, PI_OUTPUT); gpioSetMode(15, PI_OUTPUT); //pigpio pulse setup //X:25, Y1:23, Y2:24 gpioSetMode(25, PI_OUTPUT); gpioSetMode(23, PI_OUTPUT); gpioSetMode(24, PI_OUTPUT); //limit-switch setup gpioSetMode(5, PI_INPUT); gpioWrite(5, 0); gpioSetMode(6, PI_INPUT); gpioWrite(6, 0); gpioSetMode(7, PI_INPUT); gpioWrite(7, 0); gpioSetMode(8, PI_INPUT); gpioWrite(8, 0); gpioSetMode(13, PI_INPUT); gpioSetMode(19, PI_INPUT); gpioSetMode(26, PI_INPUT); gpioSetMode(21, PI_INPUT); CvCapture* capture_robot_side = cvCaptureFromCAM(0); CvCapture* capture_human_side = cvCaptureFromCAM(1); if(capture_robot_side == NULL){ std::cout << "Robot Side Camera Capture FAILED" << std::endl; return -1; } if(capture_human_side ==NULL){ std::cout << "Human Side Camera Capture FAILED" << std::endl; return -1; } // size設定 cvSetCaptureProperty(capture_robot_side,CV_CAP_PROP_FRAME_WIDTH,CAM_PIX_WIDTH); cvSetCaptureProperty(capture_robot_side,CV_CAP_PROP_FRAME_HEIGHT,CAM_PIX_HEIGHT); cvSetCaptureProperty(capture_human_side,CV_CAP_PROP_FRAME_WIDTH,CAM_PIX_WIDTH); cvSetCaptureProperty(capture_human_side,CV_CAP_PROP_FRAME_HEIGHT,CAM_PIX_HEIGHT); //fps設定 cvSetCaptureProperty(capture_robot_side,CV_CAP_PROP_FPS,fps); cvSetCaptureProperty(capture_human_side,CV_CAP_PROP_FPS,fps); // 画像の表示用ウィンドウ生成 //cvNamedWindow("Previous Image", CV_WINDOW_AUTOSIZE); cvNamedWindow("Now Image", CV_WINDOW_AUTOSIZE); cvNamedWindow("pack", CV_WINDOW_AUTOSIZE); cvNamedWindow("mallet", CV_WINDOW_AUTOSIZE); cvNamedWindow ("Poly", CV_WINDOW_AUTOSIZE); //Create trackbar to change brightness int iSliderValue1 = 50; cvCreateTrackbar("Brightness", "Now Image", &iSliderValue1, 100); //Create trackbar to change contrast int iSliderValue2 = 50; cvCreateTrackbar("Contrast", "Now Image", &iSliderValue2, 100); //pack threthold 0, 50, 120, 220, 100, 220 int iSliderValuePack1 = 54; //80; cvCreateTrackbar("minH", "pack", &iSliderValuePack1, 255); int iSliderValuePack2 = 84;//106; cvCreateTrackbar("maxH", "pack", &iSliderValuePack2, 255); int iSliderValuePack3 = 100;//219; cvCreateTrackbar("minS", "pack", &iSliderValuePack3, 255); int iSliderValuePack4 = 255;//175; cvCreateTrackbar("maxS", "pack", &iSliderValuePack4, 255); int iSliderValuePack5 = 0;//29; cvCreateTrackbar("minV", "pack", &iSliderValuePack5, 255); int iSliderValuePack6 = 255;//203; cvCreateTrackbar("maxV", "pack", &iSliderValuePack6, 255); //mallet threthold 0, 255, 100, 255, 140, 200 int iSliderValuemallet1 = 107; cvCreateTrackbar("minH", "mallet", &iSliderValuemallet1, 255); int iSliderValuemallet2 = 115; cvCreateTrackbar("maxH", "mallet", &iSliderValuemallet2, 255); int iSliderValuemallet3 = 218;//140 cvCreateTrackbar("minS", "mallet", &iSliderValuemallet3, 255); int iSliderValuemallet4 = 255; cvCreateTrackbar("maxS", "mallet", &iSliderValuemallet4, 255); int iSliderValuemallet5 = 0; cvCreateTrackbar("minV", "mallet", &iSliderValuemallet5, 255); int iSliderValuemallet6 = 255; cvCreateTrackbar("maxV", "mallet", &iSliderValuemallet6, 255); // 画像ファイルポインタの宣言 IplImage* img_robot_side = cvQueryFrame(capture_robot_side); IplImage* img_human_side = cvQueryFrame(capture_human_side); IplImage* img_all_round = cvCreateImage(cvSize(CAM_PIX_WIDTH, CAM_PIX_2HEIGHT), IPL_DEPTH_8U, 3); IplImage* tracking_img = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3); IplImage* img_all_round2 = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3); IplImage* show_img = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3); cv::Mat mat_frame1; cv::Mat mat_frame2; cv::Mat dst_img_v; cv::Mat dst_bright_cont; int iBrightness = iSliderValue1 - 50; double dContrast = iSliderValue2 / 50.0; IplImage* dst_img_frame = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3); IplImage* grayscale_img = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 1); IplImage* poly_tmp = cvCreateImage( cvGetSize( img_all_round), IPL_DEPTH_8U, 1); IplImage* poly_dst = cvCreateImage( cvGetSize( img_all_round), IPL_DEPTH_8U, 3); IplImage* poly_gray = cvCreateImage( cvGetSize(img_all_round),IPL_DEPTH_8U,1); int rotate_times = 0; //IplImage* -> Mat mat_frame1 = cv::cvarrToMat(img_robot_side); mat_frame2 = cv::cvarrToMat(img_human_side); //上下左右を反転。本番環境では、mat_frame1を反転させる cv::flip(mat_frame1, mat_frame1, 0); //水平軸で反転(垂直反転) cv::flip(mat_frame1, mat_frame1, 1); //垂直軸で反転(水平反転) vconcat(mat_frame2, mat_frame1, dst_img_v); dst_img_v.convertTo(dst_bright_cont, -1, dContrast, iBrightness); //1枚にした画像をコンバート //画像の膨張と縮小 // cv::Mat close_img; // cv::Mat element(3,3,CV_8U, cv::Scalar::all(255)); // cv::morphologyEx(dst_img_v, close_img, cv::MORPH_CLOSE, element, cv::Point(-1,-1), 3); // cv::imshow("morphologyEx", dst_img_v); // dst_img_v.convertTo(dst_bright_cont, -1, dContrast, iBrightness); //1枚にした画像をコンバート //明るさ調整した結果を変換(Mat->IplImage*)して渡す。その後解放。 *img_all_round = dst_bright_cont; cv_ColorExtraction(img_all_round, dst_img_frame, CV_BGR2HSV, 0, 11, 180, 255, 0, 255); cvCvtColor(dst_img_frame, grayscale_img, CV_BGR2GRAY); cv_Labelling(grayscale_img, tracking_img); cvCvtColor(tracking_img, poly_gray, CV_BGR2GRAY); cvCopy( poly_gray, poly_tmp); cvCvtColor( poly_gray, poly_dst, CV_GRAY2BGR); //画像の膨張と縮小 //cvMorphologyEx(tracking_img, tracking_img,) // 輪郭抽出 found = cvFindContours( poly_tmp, contStorage, &contours, sizeof( CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE); // ポリライン近似 polys = cvApproxPoly( contours, sizeof( CvContour), polyStorage, CV_POLY_APPROX_DP, 8, 10); cvInitTreeNodeIterator( &polyIterator, ( void*)polys, 10); poly = (CvSeq *)cvNextTreeNode( &polyIterator); printf("sort before by X\n"); for( i=0; i<poly->total; i++) { poly_point = *( CvPoint*)cvGetSeqElem( poly, i); cvCircle( poly_dst, poly_point, 1, CV_RGB(255, 0 , 255), -1); cvCircle( poly_dst, poly_point, 8, CV_RGB(255, 0 , 255)); std::cout << "x:" << poly_point.x << ", y:" << poly_point.y << std::endl; } printf("Poly FindTotal:%d\n",poly->total); //枠の座標決定 //左上 の 壁サイド側 upper_left_f //左上 の ゴール寄り upper_left_g //右上 の 壁サイド側 upper_right_f //右上 の ゴール寄り upper_right_g //左下 の 壁サイド側 lower_left_f //左下 の ゴール寄り lower_left_g //右下 の 壁サイド側 lower_right_f //右下 の ゴール寄り lower_right_g CvPoint upper_left_f, upper_left_g, upper_right_f, upper_right_g, lower_left_f, lower_left_g, lower_right_f, lower_right_g, robot_goal_left, robot_goal_right; CvPoint frame_points[8]; // if(poly->total == 8){ // for( i=0; i<8; i++){ // poly_point = *( CvPoint*)cvGetSeqElem( poly, i); // frame_points[i] = poly_point; // } // qsort(frame_points, 8, sizeof(CvPoint), compare_cvpoint); // printf("sort after by X\n"); // for( i=0; i<8; i++){ // std::cout << "x:" << frame_points[i].x << ", y:" << frame_points[i].y << std::endl; // } // if(frame_points[0].y < frame_points[1].y){ // upper_left_f = frame_points[0]; // lower_left_f = frame_points[1]; // } // else{ // upper_left_f = frame_points[1]; // lower_left_f = frame_points[0]; // } // if(frame_points[2].y < frame_points[3].y){ // upper_left_g = frame_points[2]; // lower_left_g = frame_points[3]; // } // else{ // upper_left_g = frame_points[3]; // lower_left_g = frame_points[2]; // } // if(frame_points[4].y < frame_points[5].y){ // upper_right_g = frame_points[4]; // lower_right_g = frame_points[5]; // } // else{ // upper_right_g = frame_points[5]; // lower_right_g = frame_points[4]; // } // if(frame_points[6].y < frame_points[7].y){ // upper_right_f = frame_points[6]; // lower_right_f = frame_points[7]; // } // else{ // upper_right_f = frame_points[7]; // lower_right_f = frame_points[6]; // } // } // else{ printf("Frame is not 8 Point\n"); upper_left_f = cvPoint(26, 29); upper_right_f = cvPoint(136, 29); lower_left_f = cvPoint(26, 220); lower_right_f = cvPoint(136, 220); upper_left_g = cvPoint(38, 22); upper_right_g = cvPoint(125, 22); lower_left_g = cvPoint(38, 226); lower_right_g = cvPoint(125, 226); robot_goal_left = cvPoint(60, 226); robot_goal_right = cvPoint(93, 226); // cvCopy(img_all_round, show_img); // cvLine(show_img, upper_left_f, upper_right_f, CV_RGB( 255, 255, 0 )); // cvLine(show_img, lower_left_f, lower_right_f, CV_RGB( 255, 255, 0 )); // cvLine(show_img, upper_right_f, lower_right_f, CV_RGB( 255, 255, 0 )); // cvLine(show_img, upper_left_f, lower_left_f, CV_RGB( 255, 255, 0 )); // // cvLine(show_img, upper_left_g, upper_right_g, CV_RGB( 0, 255, 0 )); // cvLine(show_img, lower_left_g, lower_right_g, CV_RGB( 0, 255, 0 )); // cvLine(show_img, upper_right_g, lower_right_g, CV_RGB( 0, 255, 0 )); // cvLine(show_img, upper_left_g, lower_left_g, CV_RGB( 0, 255, 0 )); //while(1){ //cvShowImage("Now Image", show_img); //cvShowImage ("Poly", poly_dst); //if(cv::waitKey(1) >= 0) { //break; //} //} //return -1; // } printf("upper_left_fX:%d, Y:%d\n",upper_left_f.x, upper_left_f.y); printf("upper_left_gX:%d, Y:%d\n",upper_left_g.x, upper_left_g.y); printf("upper_right_fX:%d,Y:%d\n", upper_right_f.x, upper_right_f.y); printf("upper_right_gX:%d, Y:%d\n" , upper_right_g.x, upper_right_g.y); printf("lower_left_fX:%d, Y:%d\n", lower_left_f.x, lower_left_f.y); printf("lower_left_gX:%d, Y:%d\n", lower_left_g.x, lower_left_g.y); printf("lower_right_fX:%d, Y:%d\n", lower_right_f.x, lower_right_f.y); printf("lower_right_gX:%d, Y:%d\n", lower_right_g.x, lower_right_g.y); printf("robot_goal_left:%d, Y:%d\n", robot_goal_left.x, robot_goal_left.y); printf("robot_goal_right:%d, Y:%d\n", robot_goal_right.x, robot_goal_right.y); cvReleaseImage(&dst_img_frame); cvReleaseImage(&grayscale_img); cvReleaseImage(&poly_tmp); cvReleaseImage(&poly_gray); cvReleaseMemStorage(&contStorage); cvReleaseMemStorage(&polyStorage); //return 1; // Init font cvInitFont(&font,CV_FONT_HERSHEY_SIMPLEX|CV_FONT_ITALIC, 0.4,0.4,0,1); bool is_pushed_decision_button = 1;//もう一方のラズパイ信号にする while(1){ //決定ボタンが押されたらスタート if(gpioRead(8)==0 && is_pushed_decision_button==1){ cvCopy(img_all_round, img_all_round2); cvCopy(img_all_round, show_img); img_robot_side = cvQueryFrame(capture_robot_side); img_human_side = cvQueryFrame(capture_human_side); //IplImage* -> Mat mat_frame1 = cv::cvarrToMat(img_robot_side); mat_frame2 = cv::cvarrToMat(img_human_side); //上下左右を反転。本番環境では、mat_frame1を反転させる cv::flip(mat_frame1, mat_frame1, 0); //水平軸で反転(垂直反転) cv::flip(mat_frame1, mat_frame1, 1); //垂直軸で反転(水平反転) vconcat(mat_frame2, mat_frame1, dst_img_v); iBrightness = iSliderValue1 - 50; dContrast = iSliderValue2 / 50.0; dst_img_v.convertTo(dst_bright_cont, -1, dContrast, iBrightness); //1枚にした画像をコンバート //明るさ調整した結果を変換(Mat->IplImage*)して渡す。その後解放。 *img_all_round = dst_bright_cont; mat_frame1.release(); mat_frame2.release(); dst_img_v.release(); cvFillPoly(img_all_round, pts, npts, 2, CV_RGB(0, 0, 0)); IplImage* dst_img_mallet = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3); IplImage* dst_img_pack = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3); IplImage* dst_img2_mallet = cvCreateImage(cvGetSize(img_all_round2), IPL_DEPTH_8U, 3); IplImage* dst_img2_pack = cvCreateImage(cvGetSize(img_all_round2), IPL_DEPTH_8U, 3); cv_ColorExtraction(img_all_round, dst_img_pack, CV_BGR2HSV, iSliderValuePack1, iSliderValuePack2, iSliderValuePack3, iSliderValuePack4, iSliderValuePack5, iSliderValuePack6); cv_ColorExtraction(img_all_round, dst_img_mallet, CV_BGR2HSV, iSliderValuemallet1, iSliderValuemallet2, iSliderValuemallet3, iSliderValuemallet4, iSliderValuemallet5, iSliderValuemallet6); cv_ColorExtraction(img_all_round2, dst_img2_pack, CV_BGR2HSV, iSliderValuePack1, iSliderValuePack2, iSliderValuePack3, iSliderValuePack4, iSliderValuePack5, iSliderValuePack6); //CvMoments moment_mallet; CvMoments moment_pack; CvMoments moment_mallet; CvMoments moment2_pack; //cvSetImageCOI(dst_img_mallet, 1); cvSetImageCOI(dst_img_pack, 1); cvSetImageCOI(dst_img_mallet, 1); cvSetImageCOI(dst_img2_pack, 1); //cvMoments(dst_img_mallet, &moment_mallet, 0); cvMoments(dst_img_pack, &moment_pack, 0); cvMoments(dst_img_mallet, &moment_mallet, 0); cvMoments(dst_img2_pack, &moment2_pack, 0); //座標計算 double m00_before = cvGetSpatialMoment(&moment2_pack, 0, 0); double m10_before = cvGetSpatialMoment(&moment2_pack, 1, 0); double m01_before = cvGetSpatialMoment(&moment2_pack, 0, 1); double m00_after = cvGetSpatialMoment(&moment_pack, 0, 0); double m10_after = cvGetSpatialMoment(&moment_pack, 1, 0); double m01_after = cvGetSpatialMoment(&moment_pack, 0, 1); double gX_before = m10_before/m00_before; double gY_before = m01_before/m00_before; double gX_after = m10_after/m00_after; double gY_after = m01_after/m00_after; double m00_mallet = cvGetSpatialMoment(&moment_mallet, 0, 0); double m10_mallet = cvGetSpatialMoment(&moment_mallet, 1, 0); double m01_mallet = cvGetSpatialMoment(&moment_mallet, 0, 1); double gX_now_mallet = m10_mallet/m00_mallet; double gY_now_mallet = m01_mallet/m00_mallet; int target_direction = -1; //目標とする向き 時計回り=1、 反時計回り=0 //円の大きさは全体の1/10で描画 cvCircle(show_img, cvPoint(gX_before, gY_before), CAM_PIX_HEIGHT/10, CV_RGB(0,0,255), 6, 8, 0); cvCircle(show_img, cvPoint(gX_now_mallet, gY_now_mallet), CAM_PIX_HEIGHT/10, CV_RGB(0,0,255), 6, 8, 0); cvLine(show_img, cvPoint(gX_before, gY_before), cvPoint(gX_after, gY_after), cvScalar(0,255,0), 2); cvLine(show_img, robot_goal_left, robot_goal_right, cvScalar(0,255,255), 2); printf("gX_after: %f\n",gX_after); printf("gY_after: %f\n",gY_after); printf("gX_before: %f\n",gX_before); printf("gY_before: %f\n",gY_before); printf("gX_now_mallet: %f\n",gX_now_mallet); printf("gY_now_mallet: %f\n",gY_now_mallet); int target_destanceY = CAM_PIX_2HEIGHT - 30; //Y座標の距離を一定にしている。ディフェンスライン。 //パックの移動は直線のため、一次関数の計算を使って、その後の軌跡を予測する。 double a_inclination; double b_intercept; int closest_frequency; int target_coordinateX; int origin_coordinateY; int target_coordinateY; double center_line = (lower_right_f.x + lower_right_g.x + lower_left_f.x + lower_left_g.x)/4; int left_frame = (upper_left_f.x + lower_left_f.x)/2; int right_frame = (upper_right_f.x + lower_right_f.x)/2; double y_line = (upper_left_f.y + lower_right_f.y)/3; double waiting_position = (robot_goal_left.x + lower_left_g.x) / 2; if(gY_after - gY_before < -1){ gpioPWM(25, 128); closest_frequency = gpioSetPWMfrequency(25, 600); target_coordinateX = waiting_position; if(waiting_position + 5 < gX_now_mallet){ target_direction = 0;//反時計回り } else if(gX_now_mallet < waiting_position - 5){ target_direction = 1;//時計回り } } /*else if(robot_goal_right.x < gX_now_mallet){ gpioPWM(25, 128); closest_frequency = gpioSetPWMfrequency(25, 1000); target_direction = 0;//反時計回り } else if(gX_now_mallet < robot_goal_left.x){ gpioPWM(25, 128); closest_frequency = gpioSetPWMfrequency(25, 1000); target_direction = 1;//時計回り }*/ else if(y_line < gY_after && y_line > gY_before){ clock_t start = clock(); clock_t end; end = start + 0.5 * (target_coordinateX - robot_goal_left.x) / 10; target_direction = 1; gpioPWM(25, 128); gpioWrite(18, target_direction); closest_frequency = gpioSetPWMfrequency(25, 1500); while(end - start < 0);//時間がくるまでループ } else{ gpioPWM(25, 0); closest_frequency = gpioSetPWMfrequency(25, 0); } if(target_direction != -1){ gpioWrite(18, target_direction); } //防御ラインの描画 cvLine(show_img, cvPoint(CAM_PIX_WIDTH, target_destanceY), cvPoint(0, target_destanceY), cvScalar(255,255,0), 2); //マレットの動きの描画 cvLine(show_img, cvPoint((int)gX_now_mallet, (int)gY_now_mallet), cvPoint((int)target_coordinateX, target_destanceY), cvScalar(0,0,255), 2); /* int amount_movement = target_coordinateX - gX_now_mallet; //reacted limit-switch and target_direction rotate // if(gpioRead(6) == 1){//X軸右 // gpioPWM(25, 128); // closest_frequency = gpioSetPWMfrequency(25, 1500); // target_direction = 0;//反時計回り // printf("X軸右リミット!反時計回り\n"); // } // else if(gpioRead(26) == 1){//X軸左 gpioPWM(25, 128); closest_frequency = gpioSetPWMfrequency(25, 1500); target_direction = 1;//時計回り printf("X軸左リミット!時計回り\n"); } else if(gpioRead(5) == 1){//Y軸右上 gpioPWM(23, 128); gpioSetPWMfrequency(23, 1500); gpioWrite(14, 0); printf("Y軸右上リミット!時計回り\n"); } else if(gpioRead(13) == 1){//Y軸右下 gpioPWM(23, 128); gpioSetPWMfrequency(23, 1500); gpioWrite(14, 1); printf("Y軸右下リミット!反時計回り\n"); } else if(gpioRead(19) == 1){//Y軸左下 gpioPWM(24, 128); gpioSetPWMfrequency(24, 1500); gpioWrite(15, 0); printf("Y軸左下リミット!時計回り\n"); } else if(gpioRead(21) == 1){//Y軸左上 gpioPWM(24, 0); gpioSetPWMfrequency(24, 1500); gpioWrite(15, 1); printf("Y軸左上リミット!反時計回り\n"); } else{ //Y軸固定のため gpioSetPWMfrequency(23, 0); gpioSetPWMfrequency(24, 0); if(amount_movement > 0){ target_direction = 1;//時計回り } else if(amount_movement < 0){ target_direction = 0;//反時計回り } } if(target_direction != -1){ gpioWrite(18, target_direction); } else{ gpioPWM(24, 0); gpioSetPWMfrequency(24, 0); } printf("setting_frequency: %d\n", closest_frequency);*/ // 指定したウィンドウ内に画像を表示する //cvShowImage("Previous Image", img_all_round2); cvShowImage("Now Image", show_img); cvShowImage("pack", dst_img_pack); cvShowImage("mallet", dst_img_mallet); cvShowImage ("Poly", poly_dst); cvReleaseImage (&dst_img_mallet); cvReleaseImage (&dst_img_pack); cvReleaseImage (&dst_img2_mallet); cvReleaseImage (&dst_img2_pack); if(cv::waitKey(1) >= 0) { break; } } else{ //リセット信号が来た場合 is_pushed_decision_button = 0; } } gpioTerminate(); cvDestroyAllWindows(); //Clean up used CvCapture* cvReleaseCapture(&capture_robot_side); cvReleaseCapture(&capture_human_side); //Clean up used images cvReleaseImage(&poly_dst); cvReleaseImage(&tracking_img); cvReleaseImage(&img_all_round); cvReleaseImage(&img_human_side); cvReleaseImage(&img_all_round2); cvReleaseImage(&show_img); cvReleaseImage(&img_robot_side); cvFree(&pts[0]); cvFree(&pts[1]); cvFree(pts); return 0; }
#include <visual.hpp> #ifdef __cplusplus extern "C" { #endif KMETHOD Tracker_new(CTX ctx, knh_sfp_t *sfp _RIX) { KTexture *t = RawPtr_to(KTexture *, sfp[1]); IplImage *src = t->src; int length = Int_to(int, sfp[2]); CvPoint center; center.x = t->src->width / 2; center.y = t->src->height / 2; CvPoint *contour = (CvPoint *)cvAlloc(sizeof(CvPoint) * length); for (int i = 0; i < length; i++) { contour[i].x = (int)(center.x * cos(2 * CV_PI * i / length) + center.x); contour[i].y = (int)(center.y * sin(2 * CV_PI * i / length) + center.y); } } KMETHOD Tracker_track(CTX ctx, knh_sfp_t *sfp _RIX) { cvSnakeImage(src, contour, length, &snake_param.alpha, &snake_param.beta, &snake_param.gamma, CV_VALUE, cvSize(15, 15), cvTermCriteria (CV_TERMCRIT_ITER, 1, 0.0), 1); }