/// **************************************************** /// /// CARTOON FILTER /// /// **************************************************** bool testApp::cvFilterCartoon(ofxCvColorImage &src, ofxCvColorImage &dst, int w, int h) { //CvtColor(src, dst, code) //cv::cvtColor(inputFrame, bgr, CV_BGRA2BGR); // cv::pyrMeanShiftFiltering(bgr.clone(), bgr, sp, sr); // PyrMeanShiftFiltering(src, dst, sp, sr, max_level=1, termcrit=(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 5, 1)) // Temporary storage. IplImage* pyr = cvCreateImage( cvSize(w,h), IPL_DEPTH_8U, 3 ); IplImage* edges = cvCreateImage( cvSize(w,h), IPL_DEPTH_8U, 1 ); IplImage* edgesRgb = cvCreateImage( cvSize(w,h), IPL_DEPTH_8U, 3 ); //cvSet(s, cvScalar(0,0,0)); ofxCvGrayscaleImage tempGrayImg; tempGrayImg.allocate(w, h); tempGrayImg.setFromColorImage(src); //------------------------------ cvPyrMeanShiftFiltering(src.getCvImage(), pyr, 10, 10); // cv::Canny(gray, edges, 150, 150); cvCanny(tempGrayImg.getCvImage(), edges, 150,150); cvCvtColor(edges, edgesRgb, CV_GRAY2RGB); cvAbsDiff(pyr, edgesRgb, pyr); //cvAbsDiff(colorImg.getCvImage(), lastFrame.getCvImage(), colorDiff.getCvImage()); dst.setFromPixels((unsigned char *)pyr->imageData, w, h); return true; }
//-------------------------------------------------------------------------------- void ofxCvGrayscaleImage::setFromCvColorImage( ofxCvColorImage& mom ) { if( matchingROI(getROI(), mom.getROI()) ) { cvCvtColor( mom.getCvImage(), cvImage, CV_RGB2GRAY ); flagImageChanged(); } else { ofLog(OF_LOG_ERROR, "in =, ROI mismatch"); } }
void margDisplay::feedImg(ofxCvColorImage& _source) { if (image.getWidth() != _source.getWidth()) { image.clear(); image.allocate(source.getWidth(), source.getHeight()); } cvWarpPerspective(_source.getCvImage(), image.getCvImage(), translate); image.flagImageChanged(); }
void chromaKeyer::keyImage( ofxCvColorImage & src, ofxCvColorImage & dst, int w, int h ) { // resize images if not at same size already if( hsvImage.width != w || hsvImage.height != h) { hsvImage.allocate(w,h); hueImg.allocate(w,h); satImg.allocate(w,h); valImg.allocate(w,h); } // convert src to hsv color space hsvImage.setFromPixels(src.getPixels(),w,h); hsvImage.convertRgbToHsv(); // extract the hsv channels to a grayscale image hsvImage.convertToGrayscalePlanarImages(hueImg,satImg,valImg); unsigned char * pixelsHue = hsvImage.getPixels(); //unsigned char * pixelsSat = satImg.getPixels(); unsigned char * dstMask = new unsigned char[w*h]; // loop through and compare /* if( pixelsHue[i] >= H-tH && pixelsHue[i] <= H+tH&& pixelsSat[i] >= S-tS && pixelsSat[i] <= S+tS ){ */ for( int i = 0; i < w*h; i++) { if( pixelsHue[i*3] >= H-tH && pixelsHue[i*3] <= H+tH&& pixelsHue[i*3+1] >= S-tS && pixelsHue[i*3+1] <= S+tS ){ dstMask[i] = 0; }else{ dstMask[i] = 255; } } hueImg.setFromPixels(dstMask,w,h); cvCopy( hsvImage.getCvImage(),dst.getCvImage(),hueImg.getCvImage());//,hueImg.getCvImage()); dst.flagImageChanged(); dst.convertHsvToRgb(); delete dstMask; }
/*ipWidth = ipGrabber[i]->getWidth(); ipHeight = ipGrabber[i]->getHeight(); if ((ipWidth > 0) && (ipHeight >0) ) { if ((ipWidth != ipImg[i].getWidth()) || (ipHeight != ipImg[i].getHeight())) { if (ipImg[i].bAllocated) ipImg[i].resize(ipWidth, ipHeight); else ipImg[i].allocate(ipWidth, ipHeight); } if ((ipWidth != outW) || (ipHeight != outH)) { ofxCvColorImage tempIpImg; tempIpImg.allocate(ipWidth, ipHeight); tempIpImg.setFromPixels(ipGrabber[i]->getPixels(), ipWidth, ipHeight); ipImg[i].scaleIntoMe(tempIpImg, OF_INTERPOLATE_NEAREST_NEIGHBOR); } else { ipImg[i].setFromPixels(ipGrabber[i]->getPixels(), ipWidth, ipHeight); } }*/ } } ///***************************************************************** /// MONOCHANNEL MONOBLOB ///******************************************************************/ void testApp::monoCmonoB(){ int numBlobs = lastBlobs.size(); if (numBlobs > 0) { if (bZoomTarget) { left = lastBlobs[0].boundingRect.x * in_analysis_scale; top = lastBlobs[0].boundingRect.y * in_analysis_scale; targW = lastBlobs[0].boundingRect.width * in_analysis_scale; targH = lastBlobs[0].boundingRect.height * in_analysis_scale; // adjust to mantain inAspect ratio int targW_inAspect = targH*inAspect; if (targW < targW_inAspect) { left -= (targW_inAspect-targW)/2; targW = targW_inAspect; } else { int targH_inAspect = targW/inAspect; top -= (targH_inAspect-targH)/2; targH = targH_inAspect; } } else { targW = cropW; targH = cropH; top = lastBlobs[0].centroid.y*in_analysis_scale-targH/2; left = lastBlobs[0].centroid.x*in_analysis_scale-targW/2; } // copyRegion needs variables as argumets int out_left = 0; copyRegion( fullFrame, left, top, targW, targH, outputImg, out_left, out_H_gap, outW, out_H_in_aspect); } } ///***************************************************************** /// MONOCHANNEL MULTIBLOB ///******************************************************************/ void testApp::monoCmultiB(){ int numBlobs = lastBlobs.size(); if (numBlobs == 1) monoCmonoB(); else if (numBlobs > 1) { int max_x = 0; int max_y = 0; for (unsigned int i = 0; i < lastBlobs.size(); i++) { left = MIN( left, lastBlobs[i].boundingRect.x) ; top = MIN( top, lastBlobs[i].boundingRect.y) ; max_x = MAX( max_x, lastBlobs[i].boundingRect.x+ lastBlobs[i].boundingRect.width ); max_y = MAX( max_y, lastBlobs[i].boundingRect.y+ lastBlobs[i].boundingRect.height ); } left *= in_analysis_scale; top *= in_analysis_scale; max_x *= in_analysis_scale; max_y *= in_analysis_scale; if (bZoomTarget) { targW = (max_x-left); targH = (max_y-top); // adjust to mantain inAspect ratio int targW_inAspect = targH*inAspect; if (targW < targW_inAspect) { left -= (targW_inAspect-targW)/2; targW = targW_inAspect; } else { int targH_inAspect = targW/inAspect; top -= (targH_inAspect-targH)/2; targH = targH_inAspect; } } else { targW = cropW; targH = cropH; // centroid of all blobs top = (float)(top+max_y)/2; top -= ((float)targH/2); left = (float)(left+max_x)/2; left -= ((float)targW/2); } int out_left = 0; // int out_H = outH; copyRegion( fullFrame, left, top, targW, targH, outputImg, out_left, out_H_gap, outW, out_H_in_aspect); } } ///***************************************************************** /// MULTICHANNEL ///******************************************************************/ void testApp::multiC(){ int numBlobs = lastBlobs.size(); if (numBlobs > 0) { // calculate number of rows & columns needeed float sqroot = sqrtf(numBlobs); float trun = truncf(sqroot); float rnd = roundf(sqroot); if (trun == rnd) rnd +=0.5; else trun += 0.5; int rows = (int)roundf(trun); if (rows <= 0) rows = 1; int cols = (int)roundf(rnd); if (cols <= 0) cols = 1; // calculate channel width and height int channelW = (float)outW/(float)cols; int channelH = channelW/outAspect; int comonGap = (outH-channelH*rows)/2; int channelGap = out_H_gap*((float)channelH/(float)outH); // letterbox black zones height // draw channels to output buffer image for (int i =0; i < numBlobs;i++) { int dx = (i%cols)*channelW; int dy = comonGap+(i/cols)*(channelH+channelGap); targW = cropW; targH = cropH; top = 0; left = 0; if (bZoomTarget) { top = lastBlobs[i].boundingRect.y; left = lastBlobs[i].boundingRect.x; targW = lastBlobs[i].boundingRect.width; targH = lastBlobs[i].boundingRect.height; targW = MAX(targW, targH/inAspect); targH = MAX(targH, targW*inAspect); } else { top = lastBlobs[i].centroid.y; left = lastBlobs[i].centroid.x; } // whithout the (float) casting a segmentation fault occurs top = in_out_scale*(float)top; top -= ((float)targH/2); left = in_out_scale*(float)left; left -= ((float)targW/2); copyRegion( fullFrame, left, top, targW, targH, outputImg, dx, dy, channelW, channelH); } } } ///***************************************************************** /// COPY REGION ///******************************************************************/ void testApp::copyRegion(ofxCvColorImage &src, int &sx, int &sy, int &sw, int &sh, ofxCvColorImage &dst, int &dx, int &dy, int &dw, int &dh){ if (sy < 0) sy = 0; else if (sy > src.height-sh) sy = src.height-sh; if (sx < 0) sx = 0; else if (sx > src.width-sw) sx = src.width-sw; src.setROI(sx, sy, sw, sh ); dst.setROI(dx,dy,dw, dh); dst.scaleIntoMe(src, CV_INTER_NN); dst.resetROI(); src.resetROI(); }
//-------------------------------------------------------------- void testApp::update(){ vidGrabber.grabFrame(); if(vidGrabber.isFrameNew()) { colorImg.setFromPixels(vidGrabber.getPixels(), vidGrabber.getWidth(), vidGrabber.getHeight()); colorImg.mirror(false, true); greyImage = colorImg; greyImageSmall.scaleIntoMe(greyImage); haarFinder.findHaarObjects(greyImageSmall); } }
//-------------------------------------------------------------------------------- void ofxCvFloatImage::operator = ( ofxCvColorImage& mom ) { if( mom.width == width && mom.height == height ) { cvCvtColor( mom.getCvImage(), cvImage, CV_RGB2GRAY ); } else { cout << "error in =, images are different sizes" << endl; } }
void dfDisplacementMap::applyDisplaceMap(ofxCvColorImage& sourceImage,ofTexture& destTexture,float hscale=0.3, float vscale=0.3){ //apply displacement unsigned char * displacePixels = this->getPixels(); unsigned char * pixels = sourceImage.getPixels(); int displace,hdisplace,vdisplace; int totalPixels=height*width*3; unsigned char * videoDisplaced = new unsigned char[totalPixels]; for (int i = 0; i < totalPixels;i+=3){ hdisplace = (int)((displacePixels[i] - 127)*hscale); //x coord vdisplace = (int)((displacePixels[i+2] - 127) *vscale); //y coord if( i%(320*3)+hdisplace*3 >0 && i%(320*3)+hdisplace*3<320*3){ displace=hdisplace+vdisplace*320; }else{ displace = 0; } displace*= 3; if(i+displace>0 && i+displace<totalPixels){ videoDisplaced[i] = pixels[i+displace]; videoDisplaced[i+1] = pixels[i+displace+1]; videoDisplaced[i+2] = pixels[i+displace+2]; } } destTexture.loadData(videoDisplaced,width,height, GL_RGB); delete videoDisplaced; }
//-------------------------------------------------------------------------------- void ofxCvColorImage::operator += ( ofxCvColorImage& mom ) { if( mom.width == width && mom.height == height ) { cvAdd( cvImage, mom.getCvImage(), cvImageTemp ); swapTemp(); } else { cout << "error in +=, images are different sizes" << endl; } }
void update() { // Update our little offset thingy. offset += 0.01; if (offset > 1) { offset = 0; } // Update our camera. grabber.update(); // If the camera has a new frame to offer us ... if (grabber.isFrameNew()) { // Make a copy of our grabber pixels in the colorImage. colorImage.setFromPixels(grabber.getPixelsRef()); // When we assign a color image to a grayscale image, it is converted automatically. grayscaleImage = colorImage; // If we set learnBackground to true using the keyboard, we'll take a snapshot of // the background and use it to create a clean foreground image. if (learnBackground == true) { // We assign the grayscaleImage to the grayscaleBackgroundImage. grayscaleBackgroundImage = grayscaleImage; // Now we set learnBakground so we won't set a background unless // explicitly directed to with a keyboard command. learnBackground = false; } // Create a difference image by comparing the background and the current grayscale images. grayscaleAbsoluteDifference.absDiff(grayscaleBackgroundImage, grayscaleImage); // Assign grayscaleAbsoluteDifference to the grayscaleBinary image. grayscaleBinary = grayscaleAbsoluteDifference; // Then threshold the grayscale image to create a binary image. grayscaleBinary.threshold(threshold, invert); // Find contours (blobs) that are between the size of 20 pixels and // 1 / 3 * (width * height) of the camera. Also find holes. contourFinder.findContours(grayscaleBinary, 100, (width * height) / 3.0, 10, true); // Get the biggest blob and use it to draw. if (contourFinder.nBlobs > 0) { holePositions.addVertex(contourFinder.blobs[0].boundingRect.getCenter()); } else { holePositions.clear(); } } }
//-------------------------------------------------------------------------------- void ofxCvColorImage::operator *= ( ofxCvColorImage& mom ) { float scalef = 1.0f / 255.0f; if( mom.width == width && mom.height == height ) { cvMul( cvImage, mom.getCvImage(), cvImageTemp, scalef ); swapTemp(); } else { cout << "error in *=, images are different sizes" << endl; } }
void setup(){ sampleImg.load("sample_img.jpg"); colorImg.allocate(900, 900); colorImg = sampleImg; grayImg = colorImg; grayImg.threshold(200); }
void setup() { ofSetWindowShape(width, height); // Set the window size. grabber.initGrabber(width, height); // Set the grabber size. // Allocate each of our helper images. colorImage.allocate(width, height); grayscaleImage.allocate(width, height); grayscaleBackgroundImage.allocate(width, height); grayscaleAbsoluteDifference.allocate(width, height); grayscaleBinary.allocate(width, height); }
//-------------------------------------------------------------- void testApp::setup(){ vidGrabber.setVerbose(true); vidGrabber.initGrabber(640, 480); colorImg.allocate(vidGrabber.getWidth(), vidGrabber.getHeight()); greyImage.allocate(vidGrabber.getWidth(), vidGrabber.getHeight()); greyImageSmall.allocate(120, 90); haarFinder.setup("haarcascade_frontalface_alt2.xml"); img.loadImage("stevejobs.png"); img.setAnchorPercent(0.5, 0.5); ofEnableAlphaBlending(); }
//-------------------------------------------------------------- void testApp::draw(){ ofSetColor(255, 255, 255); colorImg.draw(0, 0, ofGetWidth(), ofGetHeight()); glPushMatrix(); glScalef(ofGetWidth() / (float)greyImageSmall.getWidth(), ofGetHeight() / (float)greyImageSmall.getHeight(), 1); // haarTracker.draw(0, 0); ofNoFill(); for(int i = 0; i < haarFinder.blobs.size(); i++) { ofRectangle cur = haarFinder.blobs[i].boundingRect; // ofRect(cur.x, cur.y, cur.width, cur.height); int iw = cur.width * 1.4; img.draw(haarFinder.blobs[i].centroid, iw, iw * img.getHeight() / img.getWidth()); } glPopMatrix(); }
//--------------------------------------------------------------------------------- void videoBlob::set(ofxCvBlob myBlob, ofxCvColorImage myImage, ofxCvGrayscaleImage myMask){ memcpy(&blob, &myBlob, sizeof(ofxCvBlob)); // now, let's get the data in, int w = blob.boundingRect.width; int h = blob.boundingRect.height; int imgw = myImage.width; int imgh = myImage.height; int imgx = blob.boundingRect.x; int imgy = blob.boundingRect.y; unsigned char * blobRGBA = new unsigned char [ w * h * 4 ]; unsigned char * colorPixels = myImage.getPixels(); unsigned char * grayPixels = myMask.getPixels(); for (int i = 0; i < w; i++){ for (int j = 0; j < h; j++){ int posTex = (j * w + i)*4; int posGray = ((j+imgy)*imgw + (i + imgx)); int posCol = posGray * 3; blobRGBA[posTex + 0] = colorPixels[posCol + 0]; blobRGBA[posTex + 1] = colorPixels[posCol + 1]; blobRGBA[posTex + 2] = colorPixels[posCol + 2]; blobRGBA[posTex + 3] = grayPixels[posGray]; } } // myTexture.clear(); // myTexture.allocate(w,h,GL_RGBA); unsigned char * black = new unsigned char [ofNextPow2(w) * ofNextPow2(h) * 4]; memset(black, 0, ofNextPow2(w) * ofNextPow2(h) * 4); // myTexture.loadData(black, ofNextPow2(w), ofNextPow2(h), GL_RGBA); // myTexture.loadData(blobRGBA, w, h, GL_RGBA); delete black; delete blobRGBA; pos.x = blob.centroid.x; pos.y = blob.centroid.y; scale = 1; angle = 0; }
void chromaKeyer::getColorFromScreen( int x, int y, int w, ofxCvColorImage & src) { // tried many different things here but it doesnt work... need to fix it cout << " x " << x << " y " << y << endl; if( x > src.width-1 || y > src.height-1 || x < 1 || y < 1 ) return; //ofxCvColorImage temp; //temp.allocate(src.width,src.height); //temp = src; unsigned char * pixels = src.getPixels(); int pix = x * w + y; int r = pixels[ pix ]; int g = pixels[ pix + 1]; int b = pixels[ pix + 2]; setFromRGB( r, g, b); }
//-------------------------------------------------------------------------------- void ofxCvColorImage::scaleIntoMe( ofxCvColorImage& mom, int interpolationMethod){ if ((interpolationMethod != CV_INTER_NN) || (interpolationMethod != CV_INTER_LINEAR) || (interpolationMethod != CV_INTER_AREA) || (interpolationMethod != CV_INTER_CUBIC) ){ printf("error in scaleIntoMe / interpolationMethod, setting to CV_INTER_NN \n"); interpolationMethod = CV_INTER_NN; } cvResize( mom.getCvImage(), cvImage, interpolationMethod ); /* you can pass in: CV_INTER_NN - nearest-neigbor interpolation, CV_INTER_LINEAR - bilinear interpolation (used by default) CV_INTER_AREA - resampling using pixel area relation. It is preferred method for image decimation that gives moire-free results. In case of zooming it is similar to CV_INTER_NN method. CV_INTER_CUBIC - bicubic interpolation. ----> http://opencvlibrary.sourceforge.net/CvReference */ }
//-------------------------------------------------------------- void testApp::setup(){ /************ SET UP BALL ***********************/ // Initialize Ball ballPositionX = 150; ballPositionY = 150; ballVelocityX = ofRandom(-5, 5); ballVelocityY = ofRandom(-5, 5); /************ SET UP BACKGROUND ***********************/ // Set the background ofBackground(0, 0, 0); // Load background picture //mirrorTexture.allocate(camWidth, camHeight, GL_RGB); img.loadImage("space.jpg"); // Load background movie //video.loadMovie("MilkywayLow.mov"); //video.play(); /************ SET UP PARTICLE SYSTEM ***********************/ //Initialize bins int binPower = 2; particleSystem.setup(ofGetWidth(), ofGetHeight(), binPower); kParticles = 2; float padding = 1; float maxVelocity = 1.5; for(int i = 0; i < kParticles * (ofGetWidth() - padding); i++) { float x = ofRandom(padding, ofGetWidth() - padding); float y = ofRandom(padding, ofGetHeight() - padding); float xv = ofRandom(-maxVelocity, maxVelocity); float yv = ofRandom(-maxVelocity, maxVelocity); Particle particle(x, y, xv, yv); particleSystem.add(particle); } timeStep = 1; lineOpacity = 100; pointOpacity = 255; isMousePressed = false; keyAIsDown = false; slowMotion = false; particleNeighborhood = 4; particleRepulsion = 1; centerAttraction = .05; /************ SET UP KINECT ***********************/ ofSetVerticalSync(true); // Initialize Kinect kinect.init(); kinect.setVerbose(true); kinect.enableDepthNearValueWhite(true); kinect.open(); // Set up different camera views colorImg.allocate(kinect.width, kinect.height); grayImage.allocate(kinect.width, kinect.height); grayThresh.allocate(kinect.width, kinect.height); grayThreshFar.allocate(kinect.width, kinect.height); depthOrig.allocate(kinect.getWidth(), kinect.getHeight()); depthProcessed.allocate(kinect.getWidth(), kinect.getHeight()); colorImageRGB.allocate(kinect.getWidth(), kinect.getHeight()); // Establish depth and threshold settings zDepth = 1.0; nearThreshold = 50; farThreshold = 180; bThreshWithOpenCV = true; /******** SET UP GUI ***********************/ gui.setup(); gui.config->gridSize.x = 250; gui.addTitle("DEPTH PRE PROCESSING"); gui.addToggle("invert", invert); gui.addToggle("mirror", mirror); gui.addSlider("preBlur", preBlur, 0, 20); gui.addSlider("bottomThreshold", bottomThreshold, 0, 1); gui.addSlider("topThreshold", topThreshold, 0, 1); gui.addSlider("erodeAmount", erodeAmount, 0, 10); gui.addSlider("dilateAmount", dilateAmount, 0, 10); gui.addToggle("dilateBeforeErode", dilateBeforeErode); gui.addTitle("CONTOURS"); gui.addToggle("findHoles", findHoles); gui.addToggle("useApproximation", useApproximation); gui.addSlider("minBlobSize", minBlobSize, 0, 1); gui.addSlider("maxBlobSize", maxBlobSize, 0, 1); gui.addSlider("maxNumBlobs", maxNumBlobs, 0, 100); gui.addTitle("DEPTH TRANSFORM"); gui.addSlider("depthScaling", depthScale, -100, 100); gui.addSlider("depthOffset", depthOffset, -128, 128); gui.addTitle("TRACKING"); gui.addToggle("doKalman", doKalman); gui.addSlider("lerpSpeed", lerpSpeed, 0, 1); gui.addContent("depthOrig", depthOrig).setNewColumn(true); gui.addContent("depthProcessed", depthProcessed); gui.addContent("depthContours", depthContours); gui.loadFromXML(); gui.setDefaultKeys(true); gui.show(); GLfloat position[] = {ofGetWidth() * 0.6, ofGetHeight() * 0.3, -2000, 1.0 }; GLfloat ambientLight[] = { 0.0f, 0.0f, 0.0f, 1.0f }; GLfloat diffuseLight[] = { 1.0f, 1.0f, 1.0f, 1.0f }; GLfloat specularLight[] = { 1.0f, 1.0f, 1.0f, 1.0f }; glLightfv(GL_LIGHT0, GL_POSITION, position); glLightfv(GL_LIGHT0, GL_AMBIENT, ambientLight); glLightfv(GL_LIGHT0, GL_DIFFUSE, diffuseLight); glLightfv(GL_LIGHT0, GL_SPECULAR, specularLight); GLfloat matShininess[] = { 50.0 }; glMaterialfv(GL_FRONT_AND_BACK, GL_SHININESS, matShininess); glEnable(GL_LIGHT0); glShadeModel (GL_SMOOTH); glColorMaterial ( GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE ) ; glEnable ( GL_COLOR_MATERIAL ) ; // init kalman curPointSmoothed[0] = NULL; curPointSmoothed[1] = NULL; curPointSmoothed[2] = NULL; }
void ofApp::setup() { ofEnableSmoothing(); ofEnableAlphaBlending(); ofSetFrameRate(60); ofSetVerticalSync(true); ofEnableDepthTest(); ofEnableAntiAliasing(); memset( dmxData_, 0, DMX_DATA_LENGTH ); //open the device dmxInterface_ = ofxGenericDmx::createDevice(DmxDevice::DMX_DEVICE_RAW); bool opened = dmxInterface_->open(); if ( dmxInterface_ == 0 || !opened ) { printf( "No FTDI Device Found\n" ); } else { printf( "isOpen: %i\n", dmxInterface_->isOpen() ); } printf("ofxGenericDmx addon version: %s.%s\n", ofxGenericDmx::VERSION_MAJOR, ofxGenericDmx::VERSION_MINOR); std::string file = "Lightweave_loops2.json"; std::string columnsFile = "Lightweave_columns2.json"; std::string facesFile = "Lightweave_faces2.json"; bool parsingSuccessful = result.open(file); bool parsingSuccessfulColumn = columnGeometry.open(columnsFile); bool parsingSuccessfulFaces = faceGeometry.open(facesFile); for (int region = 0; region < 6; region++) { string blah = "region" + ofToString(region); for (int rings = 0; rings < result[blah].size(); rings++) { string ring = "ring" + ofToString(rings); for (int pointPos = 0; pointPos < 3; pointPos++) { string point = "point" + ofToString(pointPos); } } } //setupUDP(); camWidth = 320; camHeight = 240; vector<ofVideoDevice> devices = vidGrabber.listDevices(); for (int i = 0; i < devices.size(); i++) { if (devices[i].bAvailable) { ofLogNotice() << devices[i].id << ": " << devices[i].deviceName; } else { ofLogNotice() << devices[i].id << ": " << devices[i].deviceName << " - unavailable "; } } for (int i = 0; i < devices.size(); i++) { if (!devices[i].deviceName.find("USB")) { cout << devices[i].id << endl; pcCams.push_back(devices[i].id); } } vidGrabber.setDeviceID(pcCams[0]); // vidGrabber.setDeviceID(0); vidGrabber.initGrabber(320,240); vidGrabber1.setDeviceID(pcCams[1]); // vidGrabber1.setDeviceID(0); vidGrabber1.initGrabber(320,240); colorImg1.allocate(320,240); grayImage1.allocate(320,240); grayBg1.allocate(320,240); grayDiff1.allocate(320,240); colorImg.allocate(320,240); grayImage.allocate(320,240); grayBg.allocate(320,240); grayDiff.allocate(320,240); bLearnBackground = true; bLearnBackground1 = true; threshold = 80; drawOne = false; bottomSwarm.a = 1.1f; bottomSwarm.b = (curWidth/4.0); bottomSwarm.c = 100.0; bottomSwarm.bVel = 1.0; xPos = 0; yPos = 0; zPos = 0; cam.setPosition(result["region0"]["ring0"]["point0"][0].asFloat(),result["region0"]["ring0"]["point0"][1].asFloat(),result["region0"]["ring0"]["point0"][2].asFloat()); cam.lookAt(ofVec3f(result["region0"]["ring1"]["point0"][0].asFloat(),result["region0"]["ring1"]["point0"][1].asFloat(),result["region0"]["ring1"]["point0"][2].asFloat())); cam.rotate(ofRadToDeg(PI/2), 1.0, 0.0, 0.0); cam.setFov(32.0); sphereZPos = 25.9297; sphereXPos = 364.928; for (int i = 0; i < 20; i++) { spheresXPos[i] = ofRandom(result["region0"]["ring0"]["point0"][0].asFloat()-500, result["region0"]["ring0"]["point0"][0].asFloat()+500.0); spheresZPos[i] = ofRandom(result["region0"]["ring0"]["point0"][2].asFloat()-100.0, result["region0"]["ring0"]["point0"][2].asFloat()+100.0); } /* LIGHTING */ ofSetSmoothLighting(true); pointLight.setDiffuseColor( ofColor(0.f, 255.f, 0.f)); pointLight.setSpecularColor( ofColor(255.f, 255.f, 255.f)); pointLight.setPosition(result["region0"]["ring0"]["point0"][0].asFloat(),result["region0"]["ring0"]["point0"][1].asFloat(),result["region0"]["ring0"]["point0"][2].asFloat()); material.setShininess( 64 ); colorHue = ofRandom(0, 250); colorHue2 = ofRandom(0, 250); lightColor.setBrightness( 180.f ); lightColor.set(250,250,210); materialColor.setBrightness(250.f); materialColor.set(100,100,100); lightColor.setHue(colorHue); pointLight.setDiffuseColor(lightColor); materialColor.setHue(colorHue); material.setSpecularColor(materialColor); materialColor.set(255.0,0.0,0.0); columnMaterial.setSpecularColor(materialColor); materialColor.set(55.0,55.0,55.0); peopleMaterial.setSpecularColor(materialColor); cameraColor1.set(0.0, 0.0, 255.0); cameraColor2.set(0.0, 0.0, 255.0); columnColor.set(255, 0, 0); activeColor.set(0.0,0.0,255.0); }
void ofApp::draw() { ofBackground(0.0, 0.0, 0.0); ofSetColor(255.0,255.0,255.0); ofFill(); for (int i = 0; i < 10; i++) { ofDrawBitmapString(verbalInstructions[i], 10, (i*20)+600); } if (cameraInfoIsOn) { ofSetColor(255.0, 255.0, 255.0); ofFill(); /* drawing cameras */ ofSetColor(255.0,0.0,0.0); ofDrawBitmapString("Active column", 10, 30+10); ofSetColor(0.0,0.0,255.0); ofDrawBitmapString("Active camera #", 10, 50+10); ofSetColor(255,105,180); ofDrawBitmapString("# of Pedestrians", 10, 60+20); ofSetColor(255.0, 255.0, 255.0); int cnt = 0; for (int i = 300-70; i <= 300+700; i+=70) { if (cnt == 4 || cnt == 5 ) { ofSetColor(cameraColor1); ofDrawBitmapString("["+ofToString(cnt+1)+"]", i+22, 50+10); ofSetColor(255,105,180); ofDrawBitmapString((int)micLevelsTopNew[cnt], i+35, 60+10); cout << i+35 << endl; ofSetColor(255.0, 255.0, 255.0); } else { if (simulationIsOn) { ofSetColor(255,105,180); } else { ofSetColor(100,100,100); } ofDrawBitmapString((int)micLevelsTopNew[cnt], i+35, 60+10); cout << i+35 << endl; ofSetColor(255.0, 255.0, 255.0); ofDrawBitmapString("["+ofToString(cnt+1)+"]", i+22, 50+10); } cnt++; } cout << "" << endl; /* Draw Columns */ cnt = 0; ofFill(); ofSetColor(100.0, 100.0, 100.0); for (int i = 300; i <= 300+700; i+=70) { if (cnt == 4) { ofSetColor(columnColor); ofDrawCircle(i, 45, 7); ofSetColor(100.0, 100.0, 100.0); } else { ofDrawCircle(i, 45, 7); } cnt++; } int max_pos = 0; int max_element = -1000; for (int i = 0; i < 12; i++) { if (micLevelsTopNew[i] > max_element) { max_pos = i; max_element = micLevelsTopNew[i]; } } //ofVec2f btm = absColumnPositionTop[max_pos]; ofVec2f btm = cameraPositionsTop[max_pos]; ofVec2f desired = btm - swarmPosition; float d = sqrt((desired.x*desired.x) + (desired.y+desired.y)); float r = ofMap(ofClamp(d, 0.0, 700.0), 0.0, 700.0, 25.0, 76.5); ofColor swarmColor = ofColor(255.0, 0.0, 255.0); ofSetColor(swarmColor); ofDrawRectangle(swarmPosition.x, 45, 10, 50); ofSetColor(columnColor); ofFill(); ofDrawCircle(578, 270, 14); for (int i = 0; i < contourFinder.nBlobs; i++){ contourFinder.blobs[i].draw(238, 150); ofSetColor(255); if(contourFinder.blobs[i].hole){ ofDrawBitmapString("hole", contourFinder.blobs[i].boundingRect.getCenter().x + 360, contourFinder.blobs[i].boundingRect.getCenter().y + 540); } } ofNoFill(); ofSetColor(cameraColor1); ofDrawBitmapString("Camera 5", 238, 140); ofDrawRectangle(238, 150, 340, 240); ofSetColor(255,255,255,50.0); colorImg.draw(238, 150); for (int i = 0; i < contourFinder1.nBlobs; i++){ contourFinder1.blobs[i].draw(578, 150); // draw over the centroid if the blob is a hole ofSetColor(255); if(contourFinder1.blobs[i].hole){ ofDrawBitmapString("hole", contourFinder1.blobs[i].boundingRect.getCenter().x + 360, contourFinder1.blobs[i].boundingRect.getCenter().y + 540); } } ofNoFill(); ofSetColor(cameraColor2); ofDrawBitmapString("Camera 6", 578, 140); ofDrawRectangle(578, 150, 340, 240); ofSetColor(255,255,255,50.0); colorImg1.draw(578, 150); } ofEnableLighting(); pointLight.enable(); material.begin(); ofPushMatrix(); cam.begin(); peopleMaterial.begin(); if (simulationIsOn) { for (int i = 0; i < 20; i++) { ofSpherePrimitive cyl; cyl.setPosition(spheresXPos[i]+=ofRandom(1.5), spheresZPos[i], 10.9297); cyl.set(10, 10); cyl.draw(); if (spheresXPos[i] >= 0.0) { spheresXPos[i] = ofRandom(result["region0"]["ring0"]["point0"][0].asFloat()-500, result["region0"]["ring0"]["point0"][0].asFloat()+500.0); } } } peopleMaterial.end(); ofSetColor(100.0); ofFill(); int ct = 0; for (int region = 0; region < 3; region++) { string reg = "region" + ofToString(region); for (int pointPos = 0; pointPos < 4; pointPos++) { if (region == 1 && pointPos == 3) { } else { string point = "point" + ofToString(pointPos); ofCylinderPrimitive cyl; cyl.setPosition(columnGeometry[reg][point][0].asFloat(), columnGeometry[reg][point][1].asFloat(), columnGeometry[reg][point][2].asFloat()-90); cyl.set(2.0, 130.0); cyl.rotate(90, ofVec3f(1.0, 0.0, 0.0)); if (ct == 4) { columnMaterial.begin(); cyl.draw(); columnMaterial.end(); } else { cyl.draw(); } ct++; } } } material.end(); ofDisableLighting(); newDrawRegion(gaussianBottom, 0, 3, false); ofSetColor(155.0, 155.0, 155.0); ofFill(); for (int face = 0; face < 5; face++) { string fac = "face" + ofToString(face); ofPoint p1; ofPoint p2; ofPoint p3; ofPoint p4; p1.set(ofVec3f(faceGeometry[fac]["point0"][0].asFloat(),faceGeometry[fac]["point0"][1].asFloat(),faceGeometry[fac]["point0"][2].asFloat())); p2.set(ofVec3f(faceGeometry[fac]["point1"][0].asFloat(),faceGeometry[fac]["point1"][1].asFloat(),faceGeometry[fac]["point1"][2].asFloat())); p3.set(ofVec3f(faceGeometry[fac]["point2"][0].asFloat(),faceGeometry[fac]["point2"][1].asFloat(),faceGeometry[fac]["point2"][2].asFloat())); p4.set(ofVec3f(faceGeometry[fac]["point3"][0].asFloat(),faceGeometry[fac]["point3"][1].asFloat(),faceGeometry[fac]["point3"][2].asFloat())); ofDrawLine(p1, p2); ofDrawLine(p2, p3); ofDrawLine(p3, p4); ofDrawLine(p4, p1); } cam.end(); ofPopMatrix(); sendToDMX(); vector<ofVideoDevice> devices = vidGrabber.listDevices(); //ofDrawBitmapString(pcCams[0], 400, 50); //ofDrawBitmapString(pcCams[1], 400, 100); }
void ofApp::update() { if (ofGetElapsedTimeMillis() - lastTime >= timeToReset) { lastTime = ofGetElapsedTimeMillis(); bLearnBackground = true; bLearnBackground1 = true; } micLevelsTopNew[4] = contourFinder.nBlobs; micLevelsTopNew[5] = contourFinder1.nBlobs; /* NEW CAMERA CODE */ bool bNewFrame = false; bool bNewFrame1 = false; vidGrabber.update(); bNewFrame = vidGrabber.isFrameNew(); vidGrabber1.update(); bNewFrame1 = vidGrabber.isFrameNew(); if (bNewFrame){ colorImg.setFromPixels(vidGrabber.getPixels(), 320,240); grayImage = colorImg; if (bLearnBackground == true){ grayBg = grayImage; // the = sign copys the pixels from grayImage into grayBg (operator overloading) bLearnBackground = false; } grayDiff.absDiff(grayBg, grayImage); grayDiff.threshold(threshold); contourFinder.findContours(grayDiff, 20, (340*240)/3, 10, true); } if (bNewFrame1){ colorImg1.setFromPixels(vidGrabber1.getPixels(), 320,240); grayImage1 = colorImg1; if (bLearnBackground1 == true){ grayBg1 = grayImage1; bLearnBackground1 = false; } grayDiff1.absDiff(grayBg1, grayImage1); grayDiff1.threshold(threshold); contourFinder1.findContours(grayDiff1, 20, (340*240)/3, 10, true); } switch (ANIMATION_STATE) { case ACTIVATED: { int max_pos = 0; int max_element = -1000; for (int i = 0; i < 12; i++) { if (micLevelsTopNew[i] > max_element) { max_pos = i; max_element = micLevelsTopNew[i]; } } for (int x = 0; x < 1280; x++) { float top = pow(x-bottomSwarm.b,2); float bottom = 2*pow(bottomSwarm.c,2); bottomSwarm.curve[x] = bottomSwarm.a*exp(-(top/bottom)); } ofVec2f norm = swarmPosition; bottomSwarm.b = norm.normalize().x*1280-160; // ofVec2f btm = absColumnPositionTop[max_pos]; ofVec2f btm = cameraPositionsTop[max_pos]; ofVec2f desired = btm - swarmPosition; float d = sqrt((desired.x*desired.x) + (desired.y+desired.y)); desired.normalize(); if (d < 100) { float m = ofMap(d, 0.0, 100.0, 0.0, 4.0); desired *= m; } else { desired *= 4.0; } swarmPosition += desired; /* UPDATE WAVES */ for (int x = 0; x < 1280; x++) { gaussianBottom[x] = ofMap(bottomSwarm.curve[x], 0.0, 1.1, ambientLevel, 255.0); } break; } case DEACTIVATED: { for (int x = 0; x < 1280; x++) { float top = pow(x-bottomSwarm.b,2); float bottom = 2*pow(bottomSwarm.c,2); bottomSwarm.curve[x] = bottomSwarm.a*exp(-(top/bottom)); } swarmPosition += swarmVector; if (swarmPosition.x >= 300+770 || swarmPosition.x <= 300) { swarmVector *= -1.0; } ofVec2f norm = swarmPosition; bottomSwarm.b = norm.normalize().x*1280.0-160; for (int x = 0; x < 1280; x++) { gaussianBottom[x] = ofMap(bottomSwarm.curve[x], 0.0, 1.1, ambientLevel, 255.0); } break; } } int cnt = 0; for (int i = 0; i < 12; i++) { if (i == 4 || i == 5) { } else { micLevelsTopNew[i] = 0.0; } } if (simulationIsOn) { int cntN = 0; for (int region = 3; region < 6; region++) { string reg = "region" + ofToString(region); int numCols; if (region == 4) { numCols = 3; } else { numCols = 4; } /* TODO: Did this get f****d up? */ for (int pointPos = 0; pointPos < numCols; pointPos++) { string point = "point" + ofToString(pointPos); float colX = columnGeometry[reg][point][0].asFloat(); for (int i = 0; i < 20; i++) { if (i == 4 || i == 5) { } else { if (abs(spheresXPos[i]-columnGeometry[reg][point][0].asFloat()) < 100) { micLevelsTopNew[cntN]++; } } } cntN++; } } } }
void ofxBackground::update(ofxCvColorImage& input){ float now = ofGetElapsedTimeMillis(); // get width/height disregarding ROI IplImage* ipltemp = input.getCvImage(); _width = ipltemp->width; _height = ipltemp->height; if( inputCopy.getWidth() == 0 ) { allocate( _width, _height ); } else if( inputCopy.getWidth() != _width || inputCopy.getHeight() != _height ) { // reallocate to new size clear(); allocate( _width, _height ); } else { //don't do anything unless we have allocated! (and therefore set timeStartedLearning to a safe, non zero value) inputCopy = input; inputCopy.setROI( input.getROI() ); yuvImage.setROI( input.getROI() ); //pass on ROI'ness yuvImage.setFromPixels(inputCopy.getPixels(), _width, _height); yuvImage.convertRgbToYuv(); if((now-timeStartedLearning) < LEARNING_TIME){ //then we should be learning //LEARNING THE AVERAGE AND AVG DIFF BACKGROUND accumulateBackground(inputCopy.getCvImage()); //LEARNING THE CODEBOOK BACKGROUND pColor = (uchar *)((yuvImage.getCvImage())->imageData); for(int c=0; c<imageLen; c++) { cvupdateCodeBook(pColor, cB[c], cbBounds, nChannels); pColor += 3; } //TODO: clear stale entries bStatsDone = false; bLearning = true; }else { //its either time to do stats or not bLearning = false; if(!bStatsDone){ //do the stats, just the once createModelsfromStats(); //create the background model bStatsDone = true; }else { //learn as normal, find the foreground if any //FIND FOREGROUND BY AVG METHOD: backgroundDiff(inputCopy.getCvImage(),ImaskAVG); cvCopy(ImaskAVG,ImaskAVGCC); cvconnectedComponents(ImaskAVGCC); //FIND FOREGROUND BY CODEBOOK METHOD uchar maskPixelCodeBook; pColor = (uchar *)((yuvImage.getCvImage())->imageData); //3 channel yuv image uchar *pMask = (uchar *)((ImaskCodeBook)->imageData); //1 channel image for(int c=0; c<imageLen; c++) { maskPixelCodeBook = cvbackgroundDiff(pColor, cB[c], nChannels, minMod, maxMod); *pMask++ = maskPixelCodeBook; pColor += 3; } //This part just to visualize bounding boxes and centers if desired cvCopy(ImaskCodeBook,ImaskCodeBookCC); cvconnectedComponents(ImaskCodeBookCC); //TODO: update the learned background pixels.... //TODO: clear stale codebook entries on a much slower frequency } } backgroundAverage = ImaskAVG; backgroundAverageConnectedComponents = ImaskAVGCC; backgroundCodebook = ImaskCodeBook; backgroundCodeBookConnectedComponents = ImaskCodeBookCC; } }
void draw() { ofSetColor(255); // A few helper variables for layout. int hw = width / 2; // Half width int hh = height / 2; // Half height. int qw = width / 4; // Quarter width. int qh = height / 4; // Quarter height. int lx = 14; // Label offset x. int ly = 20; // Label offset y. grayscaleImage.draw(0, 0, qw, qh); ofDrawBitmapStringHighlight("0. Grayscale", lx, ly); grayscaleBackgroundImage.draw(qw, 0, qw, qh); ofDrawBitmapStringHighlight("1. Background\n (spacebar)", lx + qw, ly); grayscaleAbsoluteDifference.draw(0, qh, qw, qh); ofDrawBitmapStringHighlight("2. Grayscale - Background", lx, ly + qh); grayscaleBinary.draw(qw, qh, qw, qh); ofDrawBitmapStringHighlight("3. Threshold " + ofToString(threshold) + "\n (-/+: change threshold)\n ( i: invert)", lx + qw, ly + qh); // Here we use ofPushMatrix(), ... to scale all of the contours and bounding boxes. ofPushStyle(); ofPushMatrix(); ofTranslate(hw, 0); ofScale(0.5, 0.5, 1); grayscaleBinary.draw(0, 0); contourFinder.draw(); // Draw all of the contours and their bounding boxes. // Draw our line. ofSetColor(ofColor::yellow); holePositions.draw(); ofPopMatrix(); ofDrawBitmapStringHighlight("4. Contours and Bounding Boxes\n Draw a yellow line to follow\n the center of the largest blob.", lx + hw, ly); ofPopStyle(); colorImage.draw(0, 0);//, hw, hh); // ofDrawBitmapStringHighlight("5. Original", lx, ly + hh); for (int i = 0; i < contourFinder.nBlobs; ++i) { ofPolyline contour(contourFinder.blobs[i].pts); // Resample to reduce the resolution. contour = contour.getResampledBySpacing(5); float interpolatedIndex = offset * contour.size(); ofPoint position = contour.getPointAtIndexInterpolated(interpolatedIndex); ofPoint normal = contour.getNormalAtIndexInterpolated(interpolatedIndex); // Make a line pointing normal to the contour. ofPoint lineEnd = position - normal * 30; ofSetColor(ofColor::yellow); contour.draw(); ofLine(position, lineEnd); ofCircle(lineEnd, 2); } }
void ofxOpticalFlowLK :: update ( ofxCvColorImage& source ) { update( source.getPixels(), source.width, source.height, OF_IMAGE_COLOR ); }
//-------------------------------------------------------------------------------- ofxCvColorImage::ofxCvColorImage( const ofxCvColorImage& mom ) { cvCopy( mom.getCvImage(), cvImage, 0 ); }
//-------------------------------------------------------------- void testApp::update(){ /************ UPDATE BALL ***********************/ //Update ball position ballPositionX += ballVelocityX; ballPositionY += ballVelocityY; if(ballPositionX < 0 || ballPositionX > ofGetWidth()) { ballVelocityX *= -1; } if (ballPositionY < 0 || ballPositionY > ofGetHeight()) { ballVelocityY *= -1; } /************ UPDATE KINECT ***********************/ kinect.update(); // get color pixels colorImageRGB = kinect.getPixels(); // get depth pixels depthOrig = kinect.getDepthPixels(); // save original depth, and do some preprocessing depthProcessed = depthOrig; if(invert) depthProcessed.invert(); if(mirror) { depthOrig.mirror(false, true); depthProcessed.mirror(false, true); colorImageRGB.mirror(false, true); } if(preBlur) cvSmooth(depthProcessed.getCvImage(), depthProcessed.getCvImage(), CV_BLUR , preBlur*2+1); if(topThreshold) cvThreshold(depthProcessed.getCvImage(), depthProcessed.getCvImage(), topThreshold * 255, 255, CV_THRESH_TRUNC); if(bottomThreshold) cvThreshold(depthProcessed.getCvImage(), depthProcessed.getCvImage(), bottomThreshold * 255, 255, CV_THRESH_TOZERO); if(dilateBeforeErode) { if(dilateAmount) cvDilate(depthProcessed.getCvImage(), depthProcessed.getCvImage(), 0, dilateAmount); if(erodeAmount) cvErode(depthProcessed.getCvImage(), depthProcessed.getCvImage(), 0, erodeAmount); } else { if(erodeAmount) cvErode(depthProcessed.getCvImage(), depthProcessed.getCvImage(), 0, erodeAmount); if(dilateAmount) cvDilate(depthProcessed.getCvImage(), depthProcessed.getCvImage(), 0, dilateAmount); } depthProcessed.flagImageChanged(); // find contours depthContours.findContours(depthProcessed, minBlobSize * minBlobSize * depthProcessed.getWidth() * depthProcessed.getHeight(), maxBlobSize * maxBlobSize * depthProcessed.getWidth() * depthProcessed.getHeight(), maxNumBlobs, findHoles, useApproximation); // Clear old attraction points attractPts.clear(); // Find centroid point for each blob area and add an attraction force to it for (int i=0; i<depthContours.blobs.size(); i++) { attractPts.push_back(new ofPoint(depthContours.blobs[i].centroid)); //printf("Blob %d: %f %f \n", i, depthContours.blobs[i].centroid.x, depthContours.blobs[i].centroid.y); } // if one blob found, find nearest point in blob area static ofxVec3f newPoint; if(depthContours.blobs.size() == 1) { ofxCvBlob &blob = depthContours.blobs[0]; depthOrig.setROI(blob.boundingRect); double minValue, maxValue; CvPoint minLoc, maxLoc; cvMinMaxLoc(depthOrig.getCvImage(), &minValue, &maxValue, &minLoc, &maxLoc, NULL); depthOrig.resetROI(); newPoint.x = maxLoc.x + blob.boundingRect.x; newPoint.y = maxLoc.y + blob.boundingRect.y; // newPoint.z = (maxValue + offset) * depthScale; // read from depth map //printf("Min: %f %f Max: %f %f \n", minLoc.x, minLoc.y, maxLoc.x, maxLoc.y); // read directly from distance (in cm) // this doesn't seem to work, need to look into it newPoint.z = (kinect.getDistanceAt(newPoint) + depthOffset) * depthScale; // apply kalman filtering if(doKalman) { newPoint.x = updateKalman(0, newPoint.x); newPoint.y = updateKalman(1, newPoint.y); newPoint.z = updateKalman(2, newPoint.z); } } else { clearKalman(0); clearKalman(1); clearKalman(2); } pointHead = (pointHead + 1) % kNumPoints; curPoint += (newPoint - curPoint) * lerpSpeed; points[pointHead] = curPoint; }