Beispiel #1
0
	FREObject updateCameraFrame(FREContext ctx, void* funcData, uint32_t argc, FREObject argv[])
	{
		gGrabber.update();
		if( !gGrabber.isFrameNew() ) return NULL;

		FREObject as3Bitmap = argv[0];

		FREBitmapData bitmapData;

		FREAcquireBitmapData(as3Bitmap, &bitmapData);
		// do something
		uint32_t r = rand() % 255;
		uint32_t g = rand() % 255;
		uint32_t b = rand() % 255;

		unsigned char *pixel = gGrabber.getPixels();

		uint32_t* ptr = bitmapData.bits32;
		int offset = bitmapData.lineStride32 - bitmapData.width;
		int alpha = 255;
		for( uint32_t j = 0; j < bitmapData.height; j++ ){
			ptr = bitmapData.bits32 + bitmapData.lineStride32*(bitmapData.height-j-1);
			for( uint32_t i = 0; i < bitmapData.width; i++ ){
				r = *pixel++; g = *pixel++; b = *pixel++;
				*ptr++ = (alpha << 24) | (r << 16) | (g << 8) | b;
			}
		}

		FREInvalidateBitmapDataRect(as3Bitmap, 0, 0, bitmapData.width, bitmapData.height);
		FREReleaseBitmapData(as3Bitmap);

		return NULL;
	}
Beispiel #2
0
 void update() {
     cam->update();
     if(cam->isFrameNew()) {
         ofPixels& pix = cam->getPixels();
         int skip = 2;
         int range = mouseX / 25;
         for(int y = 0; y < pix.getHeight(); y += skip) {
             for(int x = 0; x < pix.getWidth(); x += skip) {
                 ofColor cur = pix.getColor(x, y);
                 ofColor result(0, 0, 0, 0);
                 if(cur.r < range || cur.r > 255-range) {
                     result.r = 255;
                     result.a = 255;
                 }
                 if(cur.g < range || cur.g > 255-range) {
                     result.g = 255;
                     result.a = 255;
                 }
                 if(cur.b < range || cur.b > 255-range) {
                     result.b = 255;
                     result.a = 255;
                 }
                 clipping.setColor(x, y, result);
             }
         }
         clipping.update();
         
         if(recording) {
             string fn = "images/" + ofToString(frameCount, 6, '0') + ".jpg";
             imageSaver.saveImage(pix, fn);
             frameCount++;
         }
     }
 }
Beispiel #3
0
	void update() {
		cam.update();
		if(cam.isFrameNew()) {
			Mat camMat = toCv(cam);
			tracker.update(camMat);
			trackerDataSave.load(tracker);
		}
	}
Beispiel #4
0
	void update()
	{
		video.update();
		
		float t = ofGetElapsedTimef() * 2;
		isf.setUniform<float>("blurAmount", ofNoise(1, 0, 0, t) * 1.5);
		isf.update();
	}
Beispiel #5
0
void update()
{
    // Update our little offset thingy.
    offset += 0.01;

    if (offset > 1) 
    {
        offset = 0;   
    }
    
    // Update our camera.
    grabber.update();
    
    // If the camera has a new frame to offer us ...
    if (grabber.isFrameNew())
    {
        // Make a copy of our grabber pixels in the colorImage.
        colorImage.setFromPixels(grabber.getPixelsRef());

        // When we assign a color image to a grayscale image, it is converted automatically.
        grayscaleImage = colorImage;
		
	    // If we set learnBackground to true using the keyboard, we'll take a snapshot of 
	    // the background and use it to create a clean foreground image.
		if (learnBackground == true)
		{
		    // We assign the grayscaleImage to the grayscaleBackgroundImage.
			grayscaleBackgroundImage = grayscaleImage;	
			
			// Now we set learnBakground so we won't set a background unless
			// explicitly directed to with a keyboard command.
			learnBackground = false;
		}

        // Create a difference image by comparing the background and the current grayscale images.
		grayscaleAbsoluteDifference.absDiff(grayscaleBackgroundImage, grayscaleImage);

        // Assign grayscaleAbsoluteDifference to the grayscaleBinary image.
        grayscaleBinary = grayscaleAbsoluteDifference;
        
        // Then threshold the grayscale image to create a binary image.
        grayscaleBinary.threshold(threshold, invert);

        // Find contours (blobs) that are between the size of 20 pixels and 
        // 1 / 3 * (width * height) of the camera. Also find holes.
		contourFinder.findContours(grayscaleBinary, 100, (width * height) / 3.0, 10, true);

        // Get the biggest blob and use it to draw.
        if (contourFinder.nBlobs > 0)
        {
            holePositions.addVertex(contourFinder.blobs[0].boundingRect.getCenter());
        }
        else 
        {
		   holePositions.clear(); 
        }
    }
}
Beispiel #6
0
 void update() {
     
     ofSetWindowTitle(ofToString(ofGetFrameRate()));
     cam.update();
     if(cam.isFrameNew()) {
         if(ofGetKeyPressed()) {
             results = ccv.classifyFace(cam);
         }
     }
 }
Beispiel #7
0
 void update() {
     vid.update();
     if(vid.isFrameNew()) {
         unsigned char* pix = vid.getPixels();
         int j = 0;
         for(int i = 0; i < n; i++) {
             unsigned char& r = pix[j++];
             unsigned char& g = pix[j++];
             unsigned char& b = pix[j++];
             mesh.setVertex(i, ofVec3f(r+20, g+20, b+20));
             mesh.setColor(i, ofColor(r+20, g+20, b+20));
         }
     }
 }
Beispiel #8
0
// Called every frame.
void update() {
    // Update our camera.
    grabber.update();
    
    // If the camera has a new frame to offer us ...
    if (grabber.isFrameNew())
    {
        // Get a reference (denoted by &) to the camera's pixels. Getting a 
        // reference means that we won't have to make a copy of all of the 
        // frame's pixels (since we only need one column anyway). This means our
        // program requires less processing power.
        //
        // const prevents us from accidentally modifying the cameras's pixels.
        const ofPixels& cameraPixels = grabber.getPixelsRef();
        
        // Choose a slit location. In this case we'll collect slits from the 
        // column in the middle of the camera feed.
        int slitPositionX = grabber.getWidth() / 2;
        
        // Cycle through each pixel in the selected column and place that pixel
        // at a position x = xPosition and y = to the same position as the 
        // oritinal.
        for (int y = 0; y < grabber.getHeight(); y++)
        {
            // Get the pixel as a color at x / y from the grabber.
            ofColor pixelFromGrabber = cameraPixels.getColor(slitPositionX, y);
            
            // Set that pixel color to the x / y position in the output pixels.
            pixels.setColor(xPosition, y, pixelFromGrabber);
        }
        
        // Increment our xPosition so next update we'll draw a colum shifted to 
        // the right by one pixel.
        xPosition = xPosition + 1;
        
        // If our xPosition is greater than or equal to the width of the display 
        // pixels, reset our x position to 0.
        if (xPosition >= pixels.getWidth())
        {
            xPosition = 0;   
        }
    }
}
void ofApp::update() {
    
    if (ofGetElapsedTimeMillis() - lastTime >= timeToReset) {
        lastTime = ofGetElapsedTimeMillis();
        bLearnBackground = true;
        bLearnBackground1 = true;
    }
    
    
    micLevelsTopNew[4] = contourFinder.nBlobs;
    micLevelsTopNew[5] = contourFinder1.nBlobs;
    
    
    /* NEW CAMERA CODE */
    
    bool bNewFrame = false;
    
    bool bNewFrame1 = false;
    
    vidGrabber.update();
    bNewFrame = vidGrabber.isFrameNew();
    
    vidGrabber1.update();
    bNewFrame1 = vidGrabber.isFrameNew();
    
    if (bNewFrame){
        
        colorImg.setFromPixels(vidGrabber.getPixels(), 320,240);
        
        grayImage = colorImg;
        if (bLearnBackground == true){
            grayBg = grayImage;		// the = sign copys the pixels from grayImage into grayBg (operator overloading)
            bLearnBackground = false;
        }
        grayDiff.absDiff(grayBg, grayImage);
        grayDiff.threshold(threshold);
        contourFinder.findContours(grayDiff, 20, (340*240)/3, 10, true);
    }
    
    if (bNewFrame1){
        
        colorImg1.setFromPixels(vidGrabber1.getPixels(), 320,240);
        
        grayImage1 = colorImg1;
        if (bLearnBackground1 == true){
            grayBg1 = grayImage1;
            bLearnBackground1 = false;
        }
        grayDiff1.absDiff(grayBg1, grayImage1);
        grayDiff1.threshold(threshold);
        contourFinder1.findContours(grayDiff1, 20, (340*240)/3, 10, true);
    }
    
    switch (ANIMATION_STATE) {
        case ACTIVATED: {
            int max_pos = 0;
            int max_element = -1000;
            for (int i = 0; i < 12; i++) {
                if (micLevelsTopNew[i] > max_element) {
                    max_pos = i;
                    max_element = micLevelsTopNew[i];
                }
            }
        
            for (int x = 0; x < 1280; x++) {
                float top = pow(x-bottomSwarm.b,2);
                float bottom = 2*pow(bottomSwarm.c,2);
                bottomSwarm.curve[x] = bottomSwarm.a*exp(-(top/bottom));
            }
            
            ofVec2f norm = swarmPosition;
            bottomSwarm.b = norm.normalize().x*1280-160;
//            ofVec2f btm = absColumnPositionTop[max_pos];
            ofVec2f btm = cameraPositionsTop[max_pos];
            ofVec2f desired =  btm - swarmPosition;
            float d = sqrt((desired.x*desired.x) + (desired.y+desired.y));
            desired.normalize();
            if (d < 100) {
                float m = ofMap(d, 0.0, 100.0, 0.0, 4.0);
                desired *= m;
            } else {
                desired *= 4.0;
            }

            swarmPosition += desired;
            
            /* UPDATE WAVES */
            for (int x = 0; x < 1280; x++) {
                gaussianBottom[x] = ofMap(bottomSwarm.curve[x], 0.0, 1.1, ambientLevel, 255.0);
            }
            break;
        }
        case DEACTIVATED: {
            
            for (int x = 0; x < 1280; x++) {
                float top = pow(x-bottomSwarm.b,2);
                float bottom = 2*pow(bottomSwarm.c,2);
                bottomSwarm.curve[x] = bottomSwarm.a*exp(-(top/bottom));
            }
            
            swarmPosition += swarmVector;
            if (swarmPosition.x >= 300+770 || swarmPosition.x <= 300) {
                swarmVector *= -1.0;
            }
            
            ofVec2f norm = swarmPosition;
            bottomSwarm.b = norm.normalize().x*1280.0-160;
            
            for (int x = 0; x < 1280; x++) {
                gaussianBottom[x] = ofMap(bottomSwarm.curve[x], 0.0, 1.1, ambientLevel, 255.0);
            }
            break;
        }
    }
    int cnt = 0;
    for (int i = 0; i < 12; i++) {
        if (i == 4 || i == 5) {
        } else {
            micLevelsTopNew[i] = 0.0;
        }
    }
    if (simulationIsOn) {
        int cntN = 0;
        for (int region = 3; region < 6; region++) {
            string reg = "region" + ofToString(region);
            int numCols;
            if (region == 4) {
                numCols = 3;
            } else {
                numCols = 4;
            }
            /* TODO: Did this get f****d up? */
            for (int pointPos = 0; pointPos < numCols; pointPos++) {
                string point = "point" + ofToString(pointPos);
                float colX = columnGeometry[reg][point][0].asFloat();
                for (int i = 0; i < 20; i++) {
                    if (i == 4 || i == 5) {
                    } else {
                        if (abs(spheresXPos[i]-columnGeometry[reg][point][0].asFloat()) < 100) {
                            micLevelsTopNew[cntN]++;
                        }
                    }
                }
                cntN++;
            }
        }
    }
    

}
Beispiel #10
0
//--------------------------------------------------------------
void ofApp::update(){
    show = vidGrabber.getPixels();
    vidGrabber.update();
}
Beispiel #11
0
//--------------------------------------------------------------
void testApp::update(){
	
	// grab new frame
	videoGrabber.update();
	
	// if there is a new frame....
	if(videoGrabber.isFrameNew()) {
		
		// RGB textures don't seem to work well. so need to copy the vidgrabber data into a RGBA texture
		int pixelIndex = 0;
		for(int i=0; i<vidWidth; i++) {
			for(int j=0; j<vidHeight; j++) {
				int indexRGB	= pixelIndex * 3;
				int indexRGBA	= pixelIndex * 4;
				
				pixels[indexRGBA  ] = videoGrabber.getPixels()[indexRGB  ];
				pixels[indexRGBA+1] = videoGrabber.getPixels()[indexRGB+1];
				pixels[indexRGBA+2] = videoGrabber.getPixels()[indexRGB+2];
				pixels[indexRGBA+3] = 255;
				pixelIndex++;
			}
		}
		

		// write the new pixel data into the OpenCL Image (and thus the OpenGL texture)
		clImage[activeImageIndex].write(pixels);
		
		
		if(doBlur) {
			msa::OpenCLKernel *kernel = openCL.kernel("msa_boxblur");
			for(int i=0; i<blurAmount; i++) {
				cl_int offset = i * i / 2 + 1;
				kernel->setArg(0, clImage[activeImageIndex].getCLMem());
				kernel->setArg(1, clImage[1-activeImageIndex].getCLMem());
				kernel->setArg(2, offset);
				kernel->run2D(vidWidth, vidHeight);
				activeImageIndex = 1 - activeImageIndex;
			}
		}
		
		if(doFlipX) {
			msa::OpenCLKernel *kernel = openCL.kernel("msa_flipx");
			kernel->setArg(0, clImage[activeImageIndex].getCLMem());
			kernel->setArg(1, clImage[1-activeImageIndex].getCLMem());
			kernel->run2D(vidWidth, vidHeight);
			activeImageIndex = 1 - activeImageIndex;
		}
		
	
		if(doFlipY) {
			msa::OpenCLKernel *kernel = openCL.kernel("msa_flipy");
			kernel->setArg(0, clImage[activeImageIndex].getCLMem());
			kernel->setArg(1, clImage[1-activeImageIndex].getCLMem());
			kernel->run2D(vidWidth, vidHeight);
			activeImageIndex = 1 - activeImageIndex;
		}
		
		if(doGreyscale) {
			msa::OpenCLKernel *kernel = openCL.kernel("msa_greyscale");
			kernel->setArg(0, clImage[activeImageIndex].getCLMem());
			kernel->setArg(1, clImage[1-activeImageIndex].getCLMem());
			kernel->run2D(vidWidth, vidHeight);
			activeImageIndex = 1 - activeImageIndex;
		}
		
		if(doInvert) {
			msa::OpenCLKernel *kernel = openCL.kernel("msa_invert");
			kernel->setArg(0, clImage[activeImageIndex].getCLMem());
			kernel->setArg(1, clImage[1-activeImageIndex].getCLMem());
			kernel->run2D(vidWidth, vidHeight);
			activeImageIndex = 1 - activeImageIndex;
		}
	
		if(doThreshold) {
			msa::OpenCLKernel *kernel = openCL.kernel("msa_threshold");
			kernel->setArg(0, clImage[activeImageIndex].getCLMem());
			kernel->setArg(1, clImage[1-activeImageIndex].getCLMem());
			kernel->setArg(2, threshLevel);
			kernel->run2D(vidWidth, vidHeight);
			activeImageIndex = 1 - activeImageIndex;
		}
		
		
		// calculate capture fps
		static float lastTime = 0;
		float nowTime = ofGetElapsedTimef();
		float timeDiff = nowTime - lastTime;
		if(timeDiff > 0 ) captureFPS = 0.9f * captureFPS + 0.1f / timeDiff;
		lastTime = nowTime;
	}
}
Beispiel #12
0
	void update()
	{
		video.update();
		chain.update();
	}