Beispiel #1
0
//--------------------------------------------------------------
void testApp::setup(){
    vidGrabber.setVerbose(true);
    vidGrabber.initGrabber(640, 480);
    
    colorImg.allocate(vidGrabber.getWidth(), vidGrabber.getHeight());
    greyImage.allocate(vidGrabber.getWidth(), vidGrabber.getHeight());
    greyImageSmall.allocate(120, 90);

    haarFinder.setup("haarcascade_frontalface_alt2.xml");
    
    img.loadImage("stevejobs.png");
    img.setAnchorPercent(0.5, 0.5);
                    
    ofEnableAlphaBlending();
}
Beispiel #2
0
void ModeFour::setup( ofVideoGrabber &vidGrabber, vector<MovingBackground> &bkgListFour ) {
    
    width = ofGetWindowWidth();
    height = ofGetWindowHeight();
    
    //setting up lines and control points
    int numLines = ofGetWindowWidth() / 10;
    int numBreaks = ofGetWindowHeight() / 8;
    
    for (int i = -15; i <numLines+15; i++){
        
        ofPolyline line;
        lineList.push_back(line);
        
        for (int j = -15; j < numBreaks+15; j++){
            Curtain c;
            c.setup(ofVec2f (i*10, j*8));
            pList.push_back(c);
            lineList[i+15].addVertex(ofVec2f(c.pos.x,c.pos.y));
        }
    }
    
    background.loadImage("tint.jpg");
    
    //setting up flowsolver
    flowSolver.setup(vidGrabber.getWidth(), vidGrabber.getHeight(), 0.5, 3, 10, 1, 7, 1.5, false, false);
    
    bkgListFour[0].setup("bkg_modeFour_0.png");
    bkgListFour[1].setup("bkg_modeFour_1.png");
    bkgListFour[2].setup("bkg_modeFour_2.png");
    bkgListFour[3].setup("bkg_modeFour_3.png");
    
}
Beispiel #3
0
//--------------------------------------------------------------
void testApp::update(){

    vidGrabber.grabFrame();
    if(vidGrabber.isFrameNew()) {
        colorImg.setFromPixels(vidGrabber.getPixels(), vidGrabber.getWidth(), vidGrabber.getHeight());
        colorImg.mirror(false, true);

        greyImage = colorImg;
        
        greyImageSmall.scaleIntoMe(greyImage);
        
        haarFinder.findHaarObjects(greyImageSmall);
    }
    
}
Beispiel #4
0
	FREObject getCameraFrameSize(FREContext ctx, void* funcData, uint32_t argc, FREObject argv[])
	{
		int w = gGrabber.getWidth();
		int h = gGrabber.getHeight();

		FREObject fX,fY;

		FRENewObjectFromInt32(w,&fX);
		FRENewObjectFromInt32(h,&fY);

		FREObject value = argv[0];

		FRESetObjectProperty(value,(const uint8_t*)"w",fX,NULL);                
		FRESetObjectProperty(value,(const uint8_t*)"h",fY,NULL);
		return NULL;
	}
Beispiel #5
0
//--------------------------------------------------------------
void testApp::setup(){
	ofBackground(50, 50, 50);
	
	
	// dump everything to console
	ofSetLogLevel(OF_LOG_VERBOSE);
	
	
	// disable vsync (to allow >60fps)
	ofSetVerticalSync(false);
	
	
	// init grabber
	videoGrabber.initGrabber(640, 480);	
	vidWidth	= videoGrabber.getWidth();
	vidHeight	= videoGrabber.getHeight();
	

	// allocate temp buffer
	pixels		= new unsigned char[vidWidth * vidHeight * 4];
	

	// init OpenCL from OpenGL context to enable GL-CL data sharing
	openCL.setupFromOpenGL();
	
	
	// create OpenCL textures and related OpenGL textures
	clImage[0].initWithTexture(vidWidth, vidHeight, GL_RGBA);
	clImage[1].initWithTexture(vidWidth, vidHeight, GL_RGBA);
	

	// load and compile OpenCL program
	openCL.loadProgramFromFile("MSAOpenCL/ImageProcessing.cl");
	
	
	// load kernels
	openCL.loadKernel("msa_boxblur");
	openCL.loadKernel("msa_flipx");
	openCL.loadKernel("msa_flipy");
	openCL.loadKernel("msa_greyscale");
	openCL.loadKernel("msa_invert");
	openCL.loadKernel("msa_threshold");
}
Beispiel #6
0
// Called every frame.
void update() {
    // Update our camera.
    grabber.update();
    
    // If the camera has a new frame to offer us ...
    if (grabber.isFrameNew())
    {
        // Get a reference (denoted by &) to the camera's pixels. Getting a 
        // reference means that we won't have to make a copy of all of the 
        // frame's pixels (since we only need one column anyway). This means our
        // program requires less processing power.
        //
        // const prevents us from accidentally modifying the cameras's pixels.
        const ofPixels& cameraPixels = grabber.getPixelsRef();
        
        // Choose a slit location. In this case we'll collect slits from the 
        // column in the middle of the camera feed.
        int slitPositionX = grabber.getWidth() / 2;
        
        // Cycle through each pixel in the selected column and place that pixel
        // at a position x = xPosition and y = to the same position as the 
        // oritinal.
        for (int y = 0; y < grabber.getHeight(); y++)
        {
            // Get the pixel as a color at x / y from the grabber.
            ofColor pixelFromGrabber = cameraPixels.getColor(slitPositionX, y);
            
            // Set that pixel color to the x / y position in the output pixels.
            pixels.setColor(xPosition, y, pixelFromGrabber);
        }
        
        // Increment our xPosition so next update we'll draw a colum shifted to 
        // the right by one pixel.
        xPosition = xPosition + 1;
        
        // If our xPosition is greater than or equal to the width of the display 
        // pixels, reset our x position to 0.
        if (xPosition >= pixels.getWidth())
        {
            xPosition = 0;   
        }
    }
}
Beispiel #7
0
void ofxImageTS::pixelate(ofVideoGrabber video, int pixelRatio) {
    ofPixels R,G,B, copy;
    copy.allocate(video.getWidth(), video.getHeight(), OF_PIXELS_RGB);
    copy = video.getPixels();
    pixelate(copy,pixelRatio);
}
Beispiel #8
0
void TTimbre::update(ofVideoGrabber input){
	originalImage.setFromPixels(input.getPixels(), input.getWidth(),  input.getHeight(), OF_IMAGE_COLOR);
	internalUpdate();
}
void ofxOpticalFlowFarneback::update(ofVideoGrabber& source) {
    update(source.getPixels().getData(), source.getWidth(), source.getHeight(), OF_IMAGE_COLOR); // assume colour image type.
}