//-------------------------------------------------------------------------------- void ofxCvImage::warpIntoMe( ofxCvImage& mom, const ofPoint src[4], const ofPoint dst[4] ){ if( !bAllocated ){ ofLogError("ofxCvImage") << "warpIntoMe(): image not allocated"; return; } if( !mom.bAllocated ){ ofLogError("ofxCvImage") << "warpIntoMe(): source image not allocated"; return; } if( mom.getCvImage()->nChannels == cvImage->nChannels && mom.getCvImage()->depth == cvImage->depth ) { // compute matrix for perspectival warping (homography) CvPoint2D32f cvsrc[4]; CvPoint2D32f cvdst[4]; CvMat* translate = cvCreateMat( 3, 3, CV_32FC1 ); cvSetZero( translate ); for (int i = 0; i < 4; i++ ) { cvsrc[i].x = src[i].x; cvsrc[i].y = src[i].y; cvdst[i].x = dst[i].x; cvdst[i].y = dst[i].y; } cvGetPerspectiveTransform( cvsrc, cvdst, translate ); // calculate homography cvWarpPerspective( mom.getCvImage(), cvImage, translate); flagImageChanged(); cvReleaseMat( &translate ); } else { ofLogError("ofxCvImage") << "warpIntoMe(): image type mismatch"; } }
//-------------------------------------------------------------------------------- void ofxCvGrayscaleImage::scaleIntoMe( ofxCvImage& mom, int interpolationMethod ){ //for interpolation you can pass in: //CV_INTER_NN - nearest-neigbor interpolation, //CV_INTER_LINEAR - bilinear interpolation (used by default) //CV_INTER_AREA - resampling using pixel area relation. It is preferred method // for image decimation that gives moire-free results. In case of // zooming it is similar to CV_INTER_NN method. //CV_INTER_CUBIC - bicubic interpolation. if( mom.getCvImage()->nChannels == cvImage->nChannels && mom.getCvImage()->depth == cvImage->depth ) { if ((interpolationMethod != CV_INTER_NN) && (interpolationMethod != CV_INTER_LINEAR) && (interpolationMethod != CV_INTER_AREA) && (interpolationMethod != CV_INTER_CUBIC) ){ ofLog(OF_LOG_WARNING, "in scaleIntoMe, setting interpolationMethod to CV_INTER_NN"); interpolationMethod = CV_INTER_NN; } cvResize( mom.getCvImage(), cvImage, interpolationMethod ); flagImageChanged(); } else { ofLog(OF_LOG_ERROR, "in scaleIntoMe: mom image type has to match"); } }
//-------------------------------------------------------------------------------- void ofxCvImage::operator *= ( ofxCvImage& mom ) { if( !mom.bAllocated ){ ofLogError("ofxCvImage") << "operator*=: mom needs to be allocated"; return; } if( !bAllocated ){ ofLogNotice("ofxCvImage") << "operator*=: allocating to match dimensions: " << mom.getWidth() << " " << mom.getHeight(); allocate(mom.getWidth(), mom.getHeight()); } if( mom.getCvImage()->nChannels == cvImage->nChannels && mom.getCvImage()->depth == cvImage->depth ) { if( matchingROI(getROI(), mom.getROI()) ) { float scalef = 1.0f / 255.0f; cvMul( cvImage, mom.getCvImage(), cvImageTemp, scalef ); swapTemp(); flagImageChanged(); } else { ofLogError("ofxCvImage") << "operator*=: region of interest mismatch"; } } else { ofLogError("ofxCvImage") << "operator*=: images type mismatch"; } }
//-------------------------------------------------------------------------------- void ofxCvImage::operator &= ( ofxCvImage& mom ) { if( !mom.bAllocated ){ ofLogError("ofxCvImage") << "operator&=: source image not allocated"; return; } if( !bAllocated ){ ofLogNotice("ofxCvImage") << "operator&=: allocating to match dimensions: " << mom.getWidth() << " " << mom.getHeight(); allocate(mom.getWidth(), mom.getHeight()); } if( mom.getCvImage()->nChannels == cvImage->nChannels && mom.getCvImage()->depth == cvImage->depth ) { if( matchingROI(getROI(), mom.getROI()) ) { cvAnd( cvImage, mom.getCvImage(), cvImageTemp ); swapTemp(); flagImageChanged(); } else { ofLogError("ofxCvImage") << "operator&=: region of interest mismatch"; } } else { ofLogError("ofxCvImage") << "operator&=: images need to have matching type"; } }
void ofxCvBrightnessContrast::setBrightnessAndContrast(ofxCvImage& img, float brightnessAmount, float contrastAmount){ brightnessVal = MAX(-127, MIN(127, brightnessAmount)); contrastVal = MAX(-127, MIN(127, contrastAmount)); unsigned char data[ 256 ]; CvMat * matrix; double delta, a, b; matrix = cvCreateMatHeader( 1, 256, CV_8UC1 ); cvSetData( matrix, data, 0 ); if ( contrastVal>0 ) { delta = (127.0f*contrastVal) / 128.0f; a = 255.0f / ( 255.0f-(delta*2.0f) ); b = a * (brightnessVal-delta); } else { delta = (-128.0f*contrastVal) / 128.0f; a = ( 256.0f-(delta*2.0f) ) / 255.0f; b = ( a*brightnessVal )+delta; } for( int i=0; i<256; i++ ) { int value = cvRound( (a*i)+b ); data[i] = (unsigned char) min( max(0,value), 255 ); } cvLUT( img.getCvImage(), img.getCvImage(), matrix ); cvReleaseMat( &matrix ); }
//-------------------------------------------------------------------------------- void ofxCvColorImage::scaleIntoMe( ofxCvImage& mom, int interpolationMethod ){ if( !bAllocated ){ ofLogError("ofxCvColorImage") << "resize(): image not allocated"; return; } if( !mom.bAllocated ){ ofLogError("ofxCvColorImage") << "resize(): source image not allocated"; return; } //for interpolation you can pass in: //CV_INTER_NN - nearest-neigbor interpolation, //CV_INTER_LINEAR - bilinear interpolation (used by default) //CV_INTER_AREA - resampling using pixel area relation. It is preferred method // for image decimation that gives moire-free results. In case of // zooming it is similar to CV_INTER_NN method. //CV_INTER_CUBIC - bicubic interpolation. if( mom.getCvImage()->nChannels == cvImage->nChannels && mom.getCvImage()->depth == cvImage->depth ) { if ((interpolationMethod != CV_INTER_NN) && (interpolationMethod != CV_INTER_LINEAR) && (interpolationMethod != CV_INTER_AREA) && (interpolationMethod != CV_INTER_CUBIC) ){ ofLogWarning("ofxCvColorImage") << "scaleIntoMe(): setting interpolationMethod to CV_INTER_NN"; interpolationMethod = CV_INTER_NN; } cvResize( mom.getCvImage(), cvImage, interpolationMethod ); flagImageChanged(); } else { ofLogError("ofxCvColorImage") << "scaleIntoMe(): type mismatch with source image"; } }
//-------------------------------------------------------------------------------- void ofxCvImage::warpIntoMe( ofxCvImage& mom, const ofPoint src[4], const ofPoint dst[4] ){ if( !bAllocated ){ ofLog(OF_LOG_ERROR, "in warpIntoMe, image not allocated"); return; } if( !mom.bAllocated ){ ofLog(OF_LOG_ERROR, "in warpIntoMe, mom not allocated"); return; } if( mom.getCvImage()->nChannels == cvImage->nChannels && mom.getCvImage()->depth == cvImage->depth ) { // compute matrix for perspectival warping (homography) CvPoint2D32f cvsrc[4]; CvPoint2D32f cvdst[4]; CvMat* translate = cvCreateMat( 3, 3, CV_32FC1 ); cvSetZero( translate ); for (int i = 0; i < 4; i++ ) { cvsrc[i].x = src[i].x; cvsrc[i].y = src[i].y; cvdst[i].x = dst[i].x; cvdst[i].y = dst[i].y; } cvWarpPerspectiveQMatrix( cvsrc, cvdst, translate ); // calculate homography cvWarpPerspective( mom.getCvImage(), cvImage, translate); flagImageChanged(); cvReleaseMat( &translate ); } else { ofLog(OF_LOG_ERROR, "in warpIntoMe: mom image type has to match"); } }
//-------------------------------------------------------------------------------- void ofxCvImage::operator *= ( ofxCvImage& mom ) { if( !mom.bAllocated ){ ofLog(OF_LOG_ERROR, "in *=, mom needs to be allocated"); return; } if( !bAllocated ){ ofLog(OF_LOG_NOTICE, "in *=, allocating to match dimensions"); allocate(mom.getWidth(), mom.getHeight()); } if( mom.getCvImage()->nChannels == cvImage->nChannels && mom.getCvImage()->depth == cvImage->depth ) { if( matchingROI(getROI(), mom.getROI()) ) { float scalef = 1.0f / 255.0f; cvMul( cvImage, mom.getCvImage(), cvImageTemp, scalef ); swapTemp(); flagImageChanged(); } else { ofLog(OF_LOG_ERROR, "in *=, ROI mismatch"); } } else { ofLog(OF_LOG_ERROR, "in *=, images need to have matching type"); } }
//-------------------------------------------------------------------------------- void ofxCvImage::operator &= ( ofxCvImage& mom ) { if( mom.getCvImage()->nChannels == cvImage->nChannels && mom.getCvImage()->depth == cvImage->depth ) { if( matchingROI(getROI(), mom.getROI()) ) { cvAnd( cvImage, mom.getCvImage(), cvImageTemp ); swapTemp(); flagImageChanged(); } else { ofLog(OF_LOG_ERROR, "in &=, ROI mismatch"); } } else { ofLog(OF_LOG_ERROR, "in &=, images need to have matching type"); } }
//-------------------------------------------------------------------------------- void ofxCvImage::operator *= ( ofxCvImage& mom ) { if( mom.getCvImage()->nChannels == cvImage->nChannels && mom.getCvImage()->depth == cvImage->depth ) { if( matchingROI(getROI(), mom.getROI()) ) { float scalef = 1.0f / 255.0f; cvMul( cvImage, mom.getCvImage(), cvImageTemp, scalef ); swapTemp(); flagImageChanged(); } else { ofLog(OF_LOG_ERROR, "in *=, ROI mismatch"); } } else { ofLog(OF_LOG_ERROR, "in *=, images need to have matching type"); } }
//-------------------------------------------------------------------------------- void ofxCvFloatImage::operator &= ( ofxCvImage& mom ) { if( mom.getCvImage()->nChannels == cvImage->nChannels && mom.getCvImage()->depth == cvImage->depth ) { if( matchingROI(getROI(), mom.getROI()) ) { //this is doing it bit-wise; probably not what we want cvAnd( cvImage, mom.getCvImage(), cvImageTemp ); swapTemp(); flagImageChanged(); } else { ofLog(OF_LOG_ERROR, "in &=, ROI mismatch"); } } else { ofLog(OF_LOG_ERROR, "in &=, images need to have matching type"); } }
//-------------------------------------------------------------------------------- void ofxCvFloatImage::operator *= ( ofxCvImage& mom ) { if( mom.getCvImage()->nChannels == cvImage->nChannels && mom.getCvImage()->depth == cvImage->depth ) { if( pushSetBothToTheirIntersectionROI(*this,mom) ) { cvMul( cvImage, mom.getCvImage(), cvImageTemp ); swapTemp(); popROI(); //restore prevoius ROI mom.popROI(); //restore prevoius ROI flagImageChanged(); } else { ofLog(OF_LOG_ERROR, "in *=, ROI mismatch"); } } else { ofLog(OF_LOG_ERROR, "in *=, images need to have matching type"); } }
//-------------------------------------------------------------------------------- void ofxCvFloatImage::operator &= ( ofxCvImage& mom ) { if( mom.getCvImage()->nChannels == cvImage->nChannels && mom.getCvImage()->depth == cvImage->depth ) { if( pushSetBothToTheirIntersectionROI(*this,mom) ) { //this is doing it bit-wise; probably not what we want cvAnd( cvImage, mom.getCvImage(), cvImageTemp ); swapTemp(); popROI(); //restore prevoius ROI mom.popROI(); //restore prevoius ROI flagImageChanged(); } else { ofLog(OF_LOG_ERROR, "in &=, ROI mismatch"); } } else { ofLog(OF_LOG_ERROR, "in &=, images need to have matching type"); } }
//-------------------------------------------------------------------------------- void ofxCvFloatImage::operator *= ( ofxCvImage& mom ) { if( mom.getWidth() == 0 || mom.getHeight() == 0 ){ ofLog(OF_LOG_ERROR, "in *=, mom width or height is 0"); return; } if( !bAllocated ){ ofLog(OF_LOG_ERROR, "in *=, image is not allocated"); return; } if( mom.getCvImage()->nChannels == cvImage->nChannels && mom.getCvImage()->depth == cvImage->depth ){ if( matchingROI(getROI(), mom.getROI()) ) { cvMul( cvImage, mom.getCvImage(), cvImageTemp ); swapTemp(); flagImageChanged(); } else { ofLog(OF_LOG_ERROR, "in *=, ROI mismatch"); } } else { ofLog(OF_LOG_ERROR, "in *=, images need to have matching type"); } }
//-------------------------------------------------------------------------------- void ofxCvFloatImage::operator &= ( ofxCvImage& mom ) { if( mom.getWidth() == 0 || mom.getHeight() == 0 ){ ofLog(OF_LOG_ERROR, "in &=, mom width or height is 0"); return; } if( !bAllocated ){ ofLog(OF_LOG_ERROR, "in &=, image is not allocated"); return; } if( mom.getCvImage()->nChannels == cvImage->nChannels && mom.getCvImage()->depth == cvImage->depth ){ if( matchingROI(getROI(), mom.getROI()) ) { //this is doing it bit-wise; probably not what we want cvAnd( cvImage, mom.getCvImage(), cvImageTemp ); swapTemp(); flagImageChanged(); } else { ofLog(OF_LOG_ERROR, "in &=, ROI mismatch"); } } else { ofLog(OF_LOG_ERROR, "in &=, images need to have matching type"); } }