Пример #1
0
void display(void){
    const uint2 imageSize = kfusion.configuration.inputSize;
    static bool integrate = true;

    glClear( GL_COLOR_BUFFER_BIT );
    const double startFrame = Stats.start();
    const double startProcessing = Stats.sample("kinect");

    //    kfusion.setKinectDeviceDepth(depthImage[GetKinectFrame()].getDeviceImage());

    kfusion.setKinectDeviceDepth(depthImage[rgbdDevice->currentDepthBufferIndex()].getDeviceImage());


    Stats.sample("raw to cooked");

    integrate = kfusion.Track();
    Stats.sample("track");

    if((should_integrate && integrate && ((counter % integration_rate) == 0)) || reset){
        kfusion.Integrate();
        kfusion.Raycast();
        Stats.sample("integrate");
        if(counter > 2) // use the first two frames to initialize
            reset = false;
    }

    renderLight( lightScene.getDeviceImage(), kfusion.inputVertex[0], kfusion.inputNormal[0], light, ambient );
    renderLight( lightModel.getDeviceImage(), kfusion.vertex, kfusion.normal, light, ambient);
    renderTrackResult(trackModel.getDeviceImage(), kfusion.reduction);
    static int count = 4;
    if(count > 3 || redraw_big_view){
        renderInput( pos, normals, dep, kfusion.integration, toMatrix4( trans * rot * preTrans ) * getInverseCameraMatrix(kfusion.configuration.camera * 2), kfusion.configuration.nearPlane, kfusion.configuration.farPlane, kfusion.configuration.stepSize(), 0.75 * kfusion.configuration.mu);
        count = 0;
        redraw_big_view = false;
    } else
        count++;
    if(render_texture)
        renderTexture( texModel.getDeviceImage(), pos, normals, rgbImage.getDeviceImage(), getCameraMatrix(2*kfusion.configuration.camera) * inverse(kfusion.pose), light);
    else
        renderLight( texModel.getDeviceImage(), pos, normals, light, ambient);
    cudaDeviceSynchronize();

    Stats.sample("render");

    glClear(GL_COLOR_BUFFER_BIT);
    glRasterPos2i(0, 0);
    glDrawPixels(lightScene); // left top
    glRasterPos2i(0, 240);
    glPixelZoom(0.5, -0.5);
    glDrawPixels(rgbImage); // left bottom
    glPixelZoom(1,-1);
    glRasterPos2i(320,0);
    glDrawPixels(lightModel); // middle top
    glRasterPos2i(320,240);
    glDrawPixels(trackModel); // middle bottom
    glRasterPos2i(640, 0);
    glDrawPixels(texModel); // right
    const double endProcessing = Stats.sample("draw");

    Stats.sample("total", endProcessing - startFrame, PerfStats::TIME);
    Stats.sample("total_proc", endProcessing - startProcessing, PerfStats::TIME);

    if(printCUDAError())
        exit(1);

    ++counter;

    if(counter % 50 == 0){
        Stats.print();
        Stats.reset();
        std::cout << std::endl;
    }

    glutSwapBuffers();
}
Пример #2
0
void display(void) {

    static bool integrate = true;

    const uint2 imageSize = kfusion.configuration.inputSize;

    const double start = Stats.start();
    renderInput(vertex.getDeviceImage(), normal.getDeviceImage(), depth.getDeviceImage(), reference, toMatrix4( trans * rot * preTrans ) * getInverseCameraMatrix(kfusion.configuration.camera), kfusion.configuration.nearPlane, kfusion.configuration.farPlane, kfusion.configuration.stepSize(), 0.01 );
    cudaDeviceSynchronize();
    Stats.sample("ground raycast");
    Stats.sample("ground copy");

    glRasterPos2i(0,0);
    glDrawPixels(vertex);
    glRasterPos2i(imageSize.x, 0);
    glDrawPixels(normal);
    glRasterPos2i(imageSize.x * 2, 0);
    glDrawPixels(depth);
    Stats.sample("ground draw");

    kfusion.setDepth( depth.getDeviceImage() );
    cudaDeviceSynchronize();
    const double track_start = Stats.sample("process depth");

    if(counter > 1){
        integrate = kfusion.Track();
        cudaDeviceSynchronize();
        Stats.sample("track");
    }

    renderTrackResult(rgb.getDeviceImage(), kfusion.reduction);
    cudaDeviceSynchronize();
    Stats.sample("track render");
    Stats.sample("track copy");

    if(integrate){
        kfusion.Integrate();
        cudaDeviceSynchronize();
        Stats.sample("integration");
        kfusion.Raycast();
        cudaDeviceSynchronize();
        Stats.sample("raycast");
        vertex = kfusion.vertex;
        normal = kfusion.normal;
        Stats.sample("raycast get");
    }

    glRasterPos2i(0,imageSize.y * 1);
    glDrawPixels(vertex);
    glRasterPos2i(imageSize.x, imageSize.y * 1);
    glDrawPixels(normal);
    glRasterPos2i(2 * imageSize.x, imageSize.y * 1);
    glDrawPixels(rgb);
    Stats.sample("track draw");

    Stats.sample("total track", Stats.get_time() - track_start, PerfStats::TIME);

    renderInput(vertex.getDeviceImage(), normal.getDeviceImage(), depth.getDeviceImage(), kfusion.integration,  kfusion.pose * getInverseCameraMatrix(kfusion.configuration.camera), kfusion.configuration.nearPlane, kfusion.configuration.farPlane, kfusion.configuration.stepSize(), 0.7 * kfusion.configuration.mu );
    cudaDeviceSynchronize();
    Stats.sample("view raycast");
    Stats.sample("view copy");

    glRasterPos2i(0,imageSize.y * 2);
    glDrawPixels(vertex);
    glRasterPos2i(imageSize.x, imageSize.y * 2);
    glDrawPixels(normal);
    glRasterPos2i(imageSize.x * 2, imageSize.y * 2);
    glDrawPixels(depth);
    Stats.sample("view draw");

    Stats.sample("events");
    Stats.sample("total all", Stats.get_time() - start, PerfStats::TIME);

    if(counter % 30 == 0){
        Stats.print();
        Stats.reset();
        cout << endl;
    }

    ++counter;

    printCUDAError();

    glutSwapBuffers();
}