void testApp::process(ofFbo & fbo, ofImage & image, string name){ fbo.begin(); ofClear(0); shader.begin(); shader.setUniformTexture("tex", cam.getTextureReference(), 0 ); shader.setUniform2f("texSize", cam.getWidth(), cam.getHeight()); shader.setUniform2f("size", fbo.getWidth(), fbo.getHeight()); glBegin(GL_QUADS); glTexCoord2f(0, 0); glVertex3f(0, 0, 0); glTexCoord2f(fbo.getWidth(), 0); glVertex3f(fbo.getWidth(), 0, 0); glTexCoord2f(fbo.getWidth(), fbo.getHeight()); glVertex3f(fbo.getWidth(), fbo.getHeight(), 0); glTexCoord2f(0,fbo.getHeight()); glVertex3f(0,fbo.getHeight(), 0); glEnd(); shader.end(); fbo.end(); TIME_SAMPLE_START(name); fbo.readToPixels(image.getPixelsRef()); //image.update(); TIME_SAMPLE_STOP(name); //image.draw(0, 0); }
void update() { for(int i = 0; i < pulses.size(); i++) { pulses[i].update(people); } fbo.begin(); ofPushStyle(); ofSetLineWidth(30); ofSetColor(0, 10); ofFill(); ofRect(0, 0, fbo.getWidth(), fbo.getHeight()); ofSetColor(255); ofTranslate(ofGetWidth() / 2, ofGetHeight() / 2); float saturation = ofMap(pulses.size(), 1, 10, 0, 255, true); for(int i = 0; i < pulses.size(); i++) { ofPushMatrix(); ofRotate(pulses[i].getAngle()); ofSetColor(ofColor::fromHsb(pulses[i].getHue(), saturation, 255)); ofLine(0, 0, ofGetWidth() / 2, 0); ofPopMatrix(); } ofPopStyle(); fbo.end(); fbo.readToPixels(fboPixels); ledRing.update(fboPixels); float presence = 0; for(int i = 0; i < people.size(); i++) { presence += people[i].getPresence(); } presence /= people.size(); midi.sendControlChange(2, 1, presence); }
void saveCapture(){ ofPixels pixels; buffer.readToPixels(pixels); ofImage screenshot(pixels); //screenshot.grabScreen(0,0,ofGetWidth(),ofGetHeight()); // TODO: either put the saveImage action into a separated thread, change the file format for something faster or find some sort of "save a little on each frame method". screenshot.saveImage("screenshot-"+ofToString(screenshotCount++)+".png"); }
void update() { #ifdef USE_AUDIO //Speaker sampling code speakerFbo.begin(); renderScene(shader, speakerXyzMap, speakerConfidenceMap); speakerFbo.end(); //Read back the fbo, and average it on the CPU speakerFbo.readToPixels(speakerPixels); speakerPixels.setImageType(OF_IMAGE_GRAYSCALE); ofxOscMessage brightnessMsg; brightnessMsg.setAddress("/audio/brightness"); float* pix = speakerPixels.getData(); for(int i = 0; i < n_speakers; i++){ float avg = 0; for(int j = 0; j < n_samples; j++){ avg += *pix++; } avg /= n_samples; brightnessMsg.addFloatArg(avg); } oscSender.sendMessage(brightnessMsg); float elapsedTime = ofGetElapsedTimef(); // copied from shader --- 8< --- float t = elapsedTime / 30.; // duration of each stage float stage = floor(t); // index of current stage float i = t - stage; // progress in current stage // copied from shader --- 8< --- if(stage != previousStage) { ofxOscMessage msg; msg.setAddress("/audio/scene_change_event"); msg.addIntArg(stage == 0 ? 0 : 2); oscSender.sendMessage(msg); } previousStage = stage; if(stage == 0) { float lighthouseAngle = ofGetElapsedTimef() / TWO_PI; lighthouseAngle += 0; // set offset here ofxOscMessage msg; msg.setAddress("/audio/lighthouse_angle"); msg.addFloatArg(fmodf(lighthouseAngle, 1)); oscSender.sendMessage(msg); } #endif }
//-------------------------------------------------------------- void testApp::update() { int c = ofRandom(255); fbo.begin(); ofClear(c, c, c); fbo.end(); TIME_SAMPLE_START("PBO"); if (mode == 0) { reader.readToPixels(fbo, pix); } TIME_SAMPLE_STOP("PBO"); TIME_SAMPLE_START("readToPixels"); if (mode == 1) { fbo.readToPixels(pix); } TIME_SAMPLE_STOP("readToPixels"); }
//-------------------------------------------------------------- void testApp::update(){ // Get infrarred image from kinect and transform to OpenCV image recordContext.update(); recordImage.update(); grayImage.setFromPixels(recordImage.ir_pixels, W, H); // Save background if(bBackground){ saveBackground(); bBackground = false; } // ROI mask selection drawRoiMask(grayImage); roi.x = MIN(roiMask[0].x, roiMask[3].x); roi.y = MIN(roiMask[0].y, roiMask[1].y); roiW = MAX(roiMask[1].x, roiMask[2].x) - roi.x; roiH = MAX(roiMask[3].y, roiMask[2].y) - roi.y; grayImage.setROI(roi.x, roi.y, roiW, roiH); backImg.setROI(roi.x, roi.y, roiW, roiH); grayAcc.setROI(roi.x, roi.y, roiW, roiH); // Opencv preprocessing t0 = ofGetElapsedTimeMillis(); if(bProcess){ // grayImage.absDiff(grayImage, backImg); grayImage.brightnessContrast(brightness, contrast); cvAdaptiveThreshold(grayImage.getCvImage(), grayImage.getCvImage(), 255); grayImage.blur(); grayImage.erode(); grayImage.dilate(); grayImage.addWeighted(grayAcc, 0.1); grayAcc = grayImage; grayImage.canny(50, 150); Mat dst = grayImage.getCvImage(); findContours(dst, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE); for (size_t i = 0; i < contours.size(); i++){ CvPoint* pts = new CvPoint[contours[i].size()]; for(int j = 0; j < contours[i].size(); j ++){ pts[j].x = contours[i][j].x; pts[j].y = contours[i][j].y; } int nPts = contours[i].size(); cvPolyLine( grayImage.getCvImage(), &pts, &nPts, 1, true, CV_RGB(255, 255, 255)); delete[] pts; } grayImage.dilate(); grayImage.erode(); } t1 = ofGetElapsedTimeMillis() - t0; //--- // Send image to MadMapper t0 = ofGetElapsedTimeMillis(); tex.allocate(grayImage.getWidth(), grayImage.getHeight(), GL_LUMINANCE); tex.loadData(grayImage.getPixels(), grayImage.getWidth(), grayImage.getHeight(), GL_LUMINANCE); individualTextureSyphonServer.publishTexture(&tex); //--- // Get image from MadMapper if(!fbo.isAllocated() || !(mClient.getWidth() == fbo.getWidth() && mClient.getHeight() == fbo.getHeight())) fbo.allocate(mClient.getWidth(), mClient.getHeight()); fbo.begin(); mClient.draw(0, 0); fbo.end(); fbo.readToPixels(pix); t2 = ofGetElapsedTimeMillis() - t0; //--- // Update my system L.update(mouseX, mouseY); }