예제 #1
0
void VideoMaterial::setCurrentFrame(const VideoFrame &frame)
{
    DPTR_D(VideoMaterial);
    d.update_texure = true;
    d.bpp = frame.format().bitsPerPixel(0);
    d.width = frame.width();
    d.height = frame.height();
    const VideoFormat fmt(frame.format());
    // http://forum.doom9.org/archive/index.php/t-160211.html
    ColorTransform::ColorSpace cs = ColorTransform::RGB;
    if (fmt.isRGB()) {
        if (fmt.isPlanar())
            cs = ColorTransform::GBR;
    } else {
        if (frame.width() >= 1280 || frame.height() > 576) //values from mpv
            cs = ColorTransform::BT709;
        else
            cs = ColorTransform::BT601;
    }
    d.colorTransform.setInputColorSpace(cs);
    d.frame = frame;
    if (fmt != d.video_format) {
        qDebug("pixel format changed: %s => %s", qPrintable(d.video_format.name()), qPrintable(fmt.name()));
        d.video_format = fmt;
    }
}
예제 #2
0
VideoFrame VideoFrame::to(const VideoFormat &fmt, const QSize& dstSize, const QRectF& roi) const
{
    if (!isValid() || !constBits(0)) {// hw surface. map to host. only supports rgb packed formats now
        Q_D(const VideoFrame);
        const QVariant v = d->metadata.value(QStringLiteral("surface_interop"));
        if (!v.isValid())
            return VideoFrame();
        VideoSurfaceInteropPtr si = v.value<VideoSurfaceInteropPtr>();
        if (!si)
            return VideoFrame();
        VideoFrame f;
        f.setDisplayAspectRatio(displayAspectRatio());
        f.setTimestamp(timestamp());
        if (si->map(HostMemorySurface, fmt, &f)) {
            if ((!dstSize.isValid() ||dstSize == QSize(width(), height())) && (!roi.isValid() || roi == QRectF(0, 0, width(), height()))) //roi is not supported now
                return f;
            return f.to(fmt, dstSize, roi);
        }
        return VideoFrame();
    }
    const int w = dstSize.width() > 0 ? dstSize.width() : width();
    const int h = dstSize.height() > 0 ? dstSize.height() : height();
    if (fmt.pixelFormatFFmpeg() == pixelFormatFFmpeg()
            && w == width() && h == height()
            // TODO: roi check.
            )
        return *this;
    Q_D(const VideoFrame);
    ImageConverterSWS conv;
    conv.setInFormat(pixelFormatFFmpeg());
    conv.setOutFormat(fmt.pixelFormatFFmpeg());
    conv.setInSize(width(), height());
    conv.setOutSize(w, h);
    conv.setInRange(colorRange());
    if (!conv.convert(d->planes.constData(), d->line_sizes.constData())) {
        qWarning() << "VideoFrame::to error: " << format() << "=>" << fmt;
        return VideoFrame();
    }
    VideoFrame f(w, h, fmt, conv.outData());
    f.setBits(conv.outPlanes());
    f.setBytesPerLine(conv.outLineSizes());
    if (fmt.isRGB()) {
        f.setColorSpace(fmt.isPlanar() ? ColorSpace_GBR : ColorSpace_RGB);
    } else {
        f.setColorSpace(ColorSpace_Unknown);
    }
    // TODO: color range
    f.setTimestamp(timestamp());
    f.setDisplayAspectRatio(displayAspectRatio());
    f.d_ptr->metadata = d->metadata; // need metadata?
    return f;
}
예제 #3
0
static QMatrix4x4 channelMap(const VideoFormat& fmt)
{
    if (fmt.isPlanar()) //currently only for planar
        return QMatrix4x4();
    switch (fmt.pixelFormat()) {
    case VideoFormat::Format_UYVY:
        return QMatrix4x4(0.0f, 0.5f, 0.0f, 0.5f,
                                 1.0f, 0.0f, 0.0f, 0.0f,
                                 0.0f, 0.0f, 1.0f, 0.0f,
                                 0.0f, 0.0f, 0.0f, 1.0f);
    case VideoFormat::Format_YUYV:
        return QMatrix4x4(0.5f, 0.0f, 0.5f, 0.0f,
                                 0.0f, 1.0f, 0.0f, 0.0f,
                                 0.0f, 0.0f, 0.0f, 1.0f,
                                 0.0f, 0.0f, 0.0f, 1.0f);
    case VideoFormat::Format_VYUY:
        return QMatrix4x4(0.0f, 0.5f, 0.0f, 0.5f,
                                 0.0f, 0.0f, 1.0f, 0.0f,
                                 1.0f, 0.0f, 0.0f, 0.0f,
                                 0.0f, 0.0f, 0.0f, 1.0f);
    case VideoFormat::Format_YVYU:
        return QMatrix4x4(0.5f, 0.0f, 0.5f, 0.0f,
                                 0.0f, 0.0f, 0.0f, 1.0f,
                                 0.0f, 1.0f, 0.0f, 0.0f,
                                 0.0f, 0.0f, 0.0f, 1.0f);
    case VideoFormat::Format_VYU:
        return QMatrix4x4(0.0f, 1.0f, 0.0f, 0.0f,
                                 0.0f, 0.0f, 1.0f, 0.0f,
                                 1.0f, 0.0f, 0.0f, 0.0f,
                                 0.0f, 0.0f, 0.0f, 1.0f);
    default:
        break;
    }

    const quint8 *channels = NULL;//{ 0, 1, 2, 3};
    for (int i = 0; gl_channel_maps[i].pixfmt != VideoFormat::Format_Invalid; ++i) {
        if (gl_channel_maps[i].pixfmt == fmt.pixelFormat()) {
            channels = gl_channel_maps[i].channels;
            break;
        }
    }
    QMatrix4x4 m;
    if (!channels)
        return m;
    m.fill(0);
    for (int i = 0; i < 4; ++i) {
        m(i, channels[i]) = 1;
    }
    qDebug() << m;
    return m;
}
예제 #4
0
bool GLWidgetRendererPrivate::initTextures(const VideoFormat &fmt)
{
    // isSupported(pixfmt)
    if (!fmt.isValid())
        return false;
    video_format.setPixelFormatFFmpeg(fmt.pixelFormatFFmpeg());

    //http://www.berkelium.com/OpenGL/GDC99/internalformat.html
    //NV12: UV is 1 plane. 16 bits as a unit. GL_LUMINANCE4, 8, 16, ... 32?
    //GL_LUMINANCE, GL_LUMINANCE_ALPHA are deprecated in GL3, removed in GL3.1
    //replaced by GL_RED, GL_RG, GL_RGB, GL_RGBA? for 1, 2, 3, 4 channel image
    //http://www.gamedev.net/topic/634850-do-luminance-textures-still-exist-to-opengl/
    //https://github.com/kivy/kivy/issues/1738: GL_LUMINANCE does work on a Galaxy Tab 2. LUMINANCE_ALPHA very slow on Linux
     //ALPHA: vec4(1,1,1,A), LUMINANCE: (L,L,L,1), LUMINANCE_ALPHA: (L,L,L,A)
    /*
     * To support both planar and packed use GL_ALPHA and in shader use r,g,a like xbmc does.
     * or use Swizzle_mask to layout the channels: http://www.opengl.org/wiki/Texture#Swizzle_mask
     * GL ES2 support: GL_RGB, GL_RGBA, GL_LUMINANCE, GL_LUMINANCE_ALPHA, GL_ALPHA
     * http://stackoverflow.com/questions/18688057/which-opengl-es-2-0-texture-formats-are-color-depth-or-stencil-renderable
     */
    internal_format = QVector<GLint>(fmt.planeCount(), FMT_INTERNAL);
    data_format = QVector<GLenum>(fmt.planeCount(), FMT);
    data_type = QVector<GLenum>(fmt.planeCount(), GL_UNSIGNED_BYTE);
    if (fmt.isPlanar()) {
        /*!
         * GLES internal_format == data_format, GL_LUMINANCE_ALPHA is 2 bytes
         * so if NV12 use GL_LUMINANCE_ALPHA, YV12 use GL_ALPHA
         */
        qDebug("///////////bpp %d", fmt.bytesPerPixel());

        internal_format[0] = data_format[0] = GL_LUMINANCE; //or GL_RED for GL
        if (fmt.planeCount() == 2) {
            internal_format[1] = data_format[1] = GL_LUMINANCE_ALPHA;
        } else {
            if (fmt.bytesPerPixel(1) == 2) {
                // read 16 bits and compute the real luminance in shader
                internal_format[0] = data_format[0] = GL_LUMINANCE_ALPHA;
                internal_format[1] = data_format[1] = GL_LUMINANCE_ALPHA; //vec4(L,L,L,A)
                internal_format[2] = data_format[2] = GL_LUMINANCE_ALPHA;
            } else {
                internal_format[1] = data_format[1] = GL_LUMINANCE; //vec4(L,L,L,1)
                internal_format[2] = data_format[2] = GL_ALPHA;//GL_ALPHA;
            }
        }
        for (int i = 0; i < internal_format.size(); ++i) {
            // xbmc use bpp not bpp(plane)
            //internal_format[i] = GetGLInternalFormat(data_format[i], fmt.bytesPerPixel(i));
            //data_format[i] = internal_format[i];
        }
    } else {
        //glPixelStorei(GL_UNPACK_ALIGNMENT, fmt.bytesPerPixel());
        // TODO: if no alpha, data_fmt is not GL_BGRA. align at every upload?
    }
    for (int i = 0; i < fmt.planeCount(); ++i) {
        //qDebug("format: %#x GL_LUMINANCE_ALPHA=%#x", data_format[i], GL_LUMINANCE_ALPHA);
        if (fmt.bytesPerPixel(i) == 2 && fmt.planeCount() == 3) {
            //data_type[i] = GL_UNSIGNED_SHORT;
        }
        int bpp_gl = bytesOfGLFormat(data_format[i], data_type[i]);
        int pad = qCeil((qreal)(texture_size[i].width() - effective_tex_width[i])/(qreal)bpp_gl);
        texture_size[i].setWidth(qCeil((qreal)texture_size[i].width()/(qreal)bpp_gl));
        effective_tex_width[i] /= bpp_gl; //fmt.bytesPerPixel(i);
        //effective_tex_width_ratio =
        qDebug("texture width: %d - %d = pad: %d. bpp(gl): %d", texture_size[i].width(), effective_tex_width[i], pad, bpp_gl);
    }

    /*
     * there are 2 fragment shaders: rgb and yuv.
     * only 1 texture for packed rgb. planar rgb likes yuv
     * To support both planar and packed yuv, and mixed yuv(NV12), we give a texture sample
     * for each channel. For packed, each (channel) texture sample is the same. For planar,
     * packed channels has the same texture sample.
     * But the number of actural textures we upload is plane count.
     * Which means the number of texture id equals to plane count
     */
    if (textures.size() != fmt.planeCount()) {
        glDeleteTextures(textures.size(), textures.data());
        qDebug("delete %d textures", textures.size());
        textures.clear();
        textures.resize(fmt.planeCount());
        glGenTextures(textures.size(), textures.data());
    }

    if (!hasGLSL) {
        initTexture(textures[0], internal_format[0], data_format[0], data_type[0], texture_size[0].width(), texture_size[0].height());
        // more than 1?
        qWarning("Does not support GLSL!");
        return false;
    }
    qDebug("init textures...");
    initTexture(textures[0], internal_format[0], data_format[0], data_type[0], texture_size[0].width(), texture_size[0].height());
    for (int i = 1; i < textures.size(); ++i) {
        initTexture(textures[i], internal_format[i], data_format[i], data_type[i], texture_size[i].width(), texture_size[i].height());
    }
    return true;
}
예제 #5
0
bool GLWidgetRendererPrivate::prepareShaderProgram(const VideoFormat &fmt)
{
    // isSupported(pixfmt)
    if (!fmt.isValid())
        return false;
    releaseShaderProgram();
    video_format.setPixelFormatFFmpeg(fmt.pixelFormatFFmpeg());
    // TODO: only to kinds, packed.glsl, planar.glsl
    QString frag;
    if (fmt.isPlanar()) {
        frag = getShaderFromFile("shaders/yuv_rgb.f.glsl");
    } else {
        frag = getShaderFromFile("shaders/rgb.f.glsl");
    }
    if (frag.isEmpty())
        return false;
    if (!fmt.isRGB() && fmt.isPlanar() && fmt.bytesPerPixel(0) == 2) {
        if (fmt.isBigEndian())
            frag.prepend("#define YUV16BITS_BE_LUMINANCE_ALPHA\n");
        else
            frag.prepend("#define YUV16BITS_LE_LUMINANCE_ALPHA\n");
        frag.prepend(QString("#define YUV%1P\n").arg(fmt.bitsPerPixel(0)));
    }
#if NO_QGL_SHADER
    program = createProgram(kVertexShader, frag.toUtf8().constData());
    if (!program) {
        qWarning("Could not create shader program.");
        return false;
    }
    // vertex shader
    a_Position = glGetAttribLocation(program, "a_Position");
    a_TexCoords = glGetAttribLocation(program, "a_TexCoords");
    u_matrix = glGetUniformLocation(program, "u_MVP_matrix");
    // fragment shader
    u_colorMatrix = glGetUniformLocation(program, "u_colorMatrix");
#else
    if (!shader_program->addShaderFromSourceCode(QGLShader::Vertex, kVertexShader)) {
        qWarning("Failed to add vertex shader: %s", shader_program->log().toUtf8().constData());
        return false;
    }
    if (!shader_program->addShaderFromSourceCode(QGLShader::Fragment, frag)) {
        qWarning("Failed to add fragment shader: %s", shader_program->log().toUtf8().constData());
        return false;
    }
    if (!shader_program->link()) {
        qWarning("Failed to link shader program...%s", shader_program->log().toUtf8().constData());
        return false;
    }
    // vertex shader
    a_Position = shader_program->attributeLocation("a_Position");
    a_TexCoords = shader_program->attributeLocation("a_TexCoords");
    u_matrix = shader_program->uniformLocation("u_MVP_matrix");
    // fragment shader
    u_colorMatrix = shader_program->uniformLocation("u_colorMatrix");
#endif //NO_QGL_SHADER
    qDebug("glGetAttribLocation(\"a_Position\") = %d\n", a_Position);
    qDebug("glGetAttribLocation(\"a_TexCoords\") = %d\n", a_TexCoords);
    qDebug("glGetUniformLocation(\"u_MVP_matrix\") = %d\n", u_matrix);
    qDebug("glGetUniformLocation(\"u_colorMatrix\") = %d\n", u_colorMatrix);

    if (fmt.isRGB())
        u_Texture.resize(1);
    else
        u_Texture.resize(fmt.channels());
    for (int i = 0; i < u_Texture.size(); ++i) {
        QString tex_var = QString("u_Texture%1").arg(i);
#if NO_QGL_SHADER
        u_Texture[i] = glGetUniformLocation(program, tex_var.toUtf8().constData());
#else
        u_Texture[i] = shader_program->uniformLocation(tex_var);
#endif
        qDebug("glGetUniformLocation(\"%s\") = %d\n", tex_var.toUtf8().constData(), u_Texture[i]);
    }
    return true;
}