Ejemplo n.º 1
0
void RecLoopRender::draw(){
    VideoFrame * bufferFrame = buffer->getNextVideoFrame();
    VideoFrame * liveFrame   = live->getNextVideoFrame();
    if(bufferFrame!=NULL && liveFrame!=NULL){
        if(!imageAllocated){
            image.allocate(liveFrame->getWidth(),liveFrame->getHeight(),OF_IMAGE_COLOR);
            imageAllocated=true;
        }

        ofEnableAlphaBlending();
        ofSetColor(tintR,tintG,tintB,alpha);
        if(minmaxBlend)
            glBlendEquationEXT(GL_MAX);
        else
            glBlendEquationEXT(GL_MIN);

        liveFrame->getTextureRef().draw(0,0);

        if(!stopped){
            bufferFrame->getTextureRef().draw(0,0);
			image.grabScreen(0,0,liveFrame->getWidth(),liveFrame->getHeight());
			bufferFrame->getTextureRef().loadData(image.getPixelsRef());
        }

        liveFrame->release();
        bufferFrame->release();

        ofDisableAlphaBlending();
        glBlendEquationEXT(GL_FUNC_ADD);

    }
}
Ejemplo n.º 2
0
void Window::render(const VideoFrame& frame)
{
    LogDebug("Rendering frame " << frame.getId());
    glClear(GL_COLOR_BUFFER_BIT);

    glEnableVertexAttribArray(0);
    glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer);
    glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 0, (void*) 0);

    glEnableVertexAttribArray(1);
    glBindBuffer(GL_ARRAY_BUFFER, uvBuffer);
    glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, (void*) 0);

    // TODO: consider linesize padding here
    // TODO: use glTexSubImage2D for more performance
    glTexImage2D(GL_TEXTURE_2D,
                 0,
                 GL_RED,
                 frame.getWidth(),
                 frame.getHeight(),
                 0,
                 GL_RED,
                 GL_UNSIGNED_BYTE,
                 frame.getLumaData());

    glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, (void*) 0);

    glDisableVertexAttribArray(1);
    glDisableVertexAttribArray(0);
    glfwSwapBuffers(glfwWindow);
}
bool VideoEncoderX264or5::doProcessFrame(Frame *org, Frame *dst)
{
    if (!(org && dst)) {
        utils::errorMsg("Error encoding video frame: org or dst are NULL");
        return false;
    }

    VideoFrame* rawFrame = dynamic_cast<VideoFrame*> (org);
    VideoFrame* codedFrame = dynamic_cast<VideoFrame*> (dst);

    if (!rawFrame || !codedFrame) {
        utils::errorMsg("Error encoding video frame: org and dst MUST be VideoFrame");
        return false;
    }

    if (!reconfigure(rawFrame, codedFrame)) {
        utils::errorMsg("Error encoding video frame: reconfigure failed");
        return false;
    }

    if (!fill_x264or5_picture(rawFrame)){
        utils::errorMsg("Could not fill x264_picture_t from frame");
        return false;
    }

    if (!encodeFrame(codedFrame)) {
        utils::errorMsg("Could not encode video frame");
        return false;
    }

    codedFrame->setSize(rawFrame->getWidth(), rawFrame->getHeight());

    return true;
}
Ejemplo n.º 4
0
void SHMSink::render_frame(VideoFrame& src)
{
    VideoFrame dst;
    VideoScaler scaler;

    const int width = src.getWidth();
    const int height = src.getHeight();
    const int format = VIDEO_PIXFMT_BGRA;
    size_t bytes = dst.getSize(width, height, format);

    shm_lock();

    if (!resize_area(sizeof(SHMHeader) + bytes)) {
        ERROR("Could not resize area");
        return;
    }

    dst.setDestination(shm_area_->data, width, height, format);
    scaler.scale(src, dst);

#ifdef DEBUG_FPS
    const std::chrono::time_point<std::chrono::system_clock> currentTime = std::chrono::system_clock::now();
    const std::chrono::duration<double> seconds = currentTime - lastFrameDebug_;
    frameCount_++;
    if (seconds.count() > 1) {
        DEBUG("%s: FPS %f", shm_name_.c_str(), frameCount_ / seconds.count());
        frameCount_ = 0;
        lastFrameDebug_ = currentTime;
    }
#endif

    shm_area_->buffer_size = bytes;
    shm_area_->buffer_gen++;
    sem_post(&shm_area_->notification);
    shm_unlock();
}