示例#1
0
bool VideoEncoderFFmpeg::encode(const VideoFrame &frame)
{
    DPTR_D(VideoEncoderFFmpeg);
    AVFrame *f = NULL;
    if (frame.isValid()) {
        f = av_frame_alloc();
        f->format = frame.format().pixelFormatFFmpeg();
        f->width = frame.width();
        f->height = frame.height();
//        f->quality = d.avctx->global_quality;
        switch (timestampMode()) {
        case TimestampCopy:
            f->pts = int64_t(frame.timestamp()*frameRate()); // TODO: check monotically increase and fix if not. or another mode?
            break;
        case TimestampMonotonic:
            f->pts = d.nb_encoded+1;
            break;
        default:
            break;
        }
        // pts is set in muxer
        const int nb_planes = frame.planeCount();
        for (int i = 0; i < nb_planes; ++i) {
            f->linesize[i] = frame.bytesPerLine(i);
            f->data[i] = (uint8_t*)frame.constBits(i);
        }
        if (d.avctx->width <= 0) {
            d.avctx->width = frame.width();
        }
        if (d.avctx->height <= 0) {
            d.avctx->height = frame.width();
        }
    }
    AVPacket pkt;
    av_init_packet(&pkt);
    pkt.data = (uint8_t*)d.buffer.constData();
    pkt.size = d.buffer.size();
    int got_packet = 0;
    int ret = avcodec_encode_video2(d.avctx, &pkt, f, &got_packet);
    av_frame_free(&f);
    if (ret < 0) {
        qWarning("error avcodec_encode_video2: %s" ,av_err2str(ret));
        return false; //false
    }
    d.nb_encoded++;
    if (!got_packet) {
        qWarning("no packet got");
        d.packet = Packet();
        // invalid frame means eof
        return frame.isValid();
    }
   // qDebug("enc avpkt.pts: %lld, dts: %lld.", pkt.pts, pkt.dts);
    d.packet = Packet::fromAVPacket(&pkt, av_q2d(d.avctx->time_base));
   // qDebug("enc packet.pts: %.3f, dts: %.3f.", d.packet.pts, d.packet.dts);
    return true;
}
示例#2
0
VideoFrame VideoDecoderContext::decodeVideo(OptionalErrorCode ec, const Packet &packet, size_t offset, size_t *decodedBytes, bool autoAllocateFrame)
{
    clear_if(ec);

    VideoFrame outFrame;
    if (!autoAllocateFrame)
    {
        outFrame = {pixelFormat(), width(), height(), 32};

        if (!outFrame.isValid())
        {
            throws_if(ec, Errors::FrameInvalid);
            return VideoFrame();
        }
    }

    int gotFrame = 0;
    auto st = decodeCommon(outFrame, packet, offset, gotFrame, avcodec_decode_video_legacy);

    if (get<1>(st)) {
        throws_if(ec, get<0>(st), *get<1>(st));
        return VideoFrame();
    }

    if (!gotFrame)
        return VideoFrame();

    outFrame.setPictureType(AV_PICTURE_TYPE_I);

    if (decodedBytes)
        *decodedBytes = get<0>(st);

    return outFrame;
}
示例#3
0
VideoFrame VideoFrameConverter::convert(const VideoFrame &frame, int fffmt) const
{
    if (!frame.isValid() || fffmt == QTAV_PIX_FMT_C(NONE))
        return VideoFrame();
    if (!frame.constBits(0)) // hw surface
        return frame.to(VideoFormat::pixelFormatFromFFmpeg(fffmt));
    const VideoFormat format(frame.format());
    //if (fffmt == format.pixelFormatFFmpeg())
      //  return *this;
    if (!m_cvt) {
        m_cvt = new ImageConverterSWS();
    }
    m_cvt->setBrightness(m_eq[0]);
    m_cvt->setContrast(m_eq[1]);
    m_cvt->setSaturation(m_eq[2]);
    m_cvt->setInFormat(format.pixelFormatFFmpeg());
    m_cvt->setOutFormat(fffmt);
    m_cvt->setInSize(frame.width(), frame.height());
    m_cvt->setOutSize(frame.width(), frame.height());
    m_cvt->setInRange(frame.colorRange());
    const int pal = format.hasPalette();
    QVector<const uchar*> pitch(format.planeCount() + pal);
    QVector<int> stride(format.planeCount() + pal);
    for (int i = 0; i < format.planeCount(); ++i) {
        pitch[i] = frame.constBits(i);
        stride[i] = frame.bytesPerLine(i);
    }
    const QByteArray paldata(frame.metaData(QStringLiteral("pallete")).toByteArray());
    if (pal > 0) {
        pitch[1] = (const uchar*)paldata.constData();
        stride[1] = paldata.size();
    }
    if (!m_cvt->convert(pitch.constData(), stride.constData())) {
        return VideoFrame();
    }
    const VideoFormat fmt(fffmt);
    VideoFrame f(frame.width(), frame.height(), fmt, m_cvt->outData());
    f.setBits(m_cvt->outPlanes());
    f.setBytesPerLine(m_cvt->outLineSizes());
    f.setTimestamp(frame.timestamp());
    f.setDisplayAspectRatio(frame.displayAspectRatio());
    // metadata?
    if (fmt.isRGB()) {
        f.setColorSpace(fmt.isPlanar() ? ColorSpace_GBR : ColorSpace_RGB);
    } else {
        f.setColorSpace(ColorSpace_Unknown);
    }
    // TODO: color range
    return f;
}
示例#4
0
bool VideoEncoderFFmpeg::encode(const VideoFrame &frame)
{
    DPTR_D(VideoEncoderFFmpeg);
    AVFrame *f = NULL;
    if (frame.isValid()) {
        f = av_frame_alloc();
        f->format = frame.format().pixelFormatFFmpeg();
        f->width = frame.width();
        f->height = frame.height();
        // TODO: record last pts
        f->pts = int64_t(frame.timestamp()*frameRate());
        // pts is set in muxer
        const int nb_planes = frame.planeCount();
        for (int i = 0; i < nb_planes; ++i) {
            f->linesize[i] = frame.bytesPerLine(i);
            f->data[i] = (uint8_t*)frame.bits(i);
        }
        if (d.avctx->width <= 0) {
            d.avctx->width = frame.width();
        }
        if (d.avctx->height <= 0) {
            d.avctx->height = frame.width();
        }
    }
    AVPacket pkt;
    av_init_packet(&pkt);
    pkt.data = (uint8_t*)d.buffer.constData();
    pkt.size = d.buffer.size();
    int got_packet = 0;
    int ret = avcodec_encode_video2(d.avctx, &pkt, f, &got_packet);
    av_frame_free(&f);
    if (ret < 0) {
        //qWarning("error avcodec_encode_video2: %s" ,av_err2str(ret));
        return false; //false
    }
    if (!got_packet) {
        qWarning("no packet got");
        return false; //false
    }
    qDebug("enc avpkt.pts: %lld, dts: %lld.", pkt.pts, pkt.dts);
    d.packet = Packet::fromAVPacket(&pkt, av_q2d(d.avctx->time_base));
    qDebug("enc packet.pts: %.3f, dts: %.3f.", d.packet.pts, d.packet.dts);
    return true;
}
示例#5
0
VideoFrame VideoFrameConverter::convert(const VideoFrame &frame, int fffmt) const
{
    if (!frame.isValid() || fffmt == QTAV_PIX_FMT_C(NONE))
        return VideoFrame();
    if (!frame.bits(0)) // hw surface
        return frame.to(VideoFormat::pixelFormatFromFFmpeg(fffmt));
    const VideoFormat format(frame.format());
    //if (fffmt == format.pixelFormatFFmpeg())
    //  return *this;
    if (!m_cvt) {
        m_cvt = new ImageConverterSWS();
    }
    m_cvt->setBrightness(m_eq[0]);
    m_cvt->setContrast(m_eq[1]);
    m_cvt->setSaturation(m_eq[2]);
    m_cvt->setInFormat(format.pixelFormatFFmpeg());
    m_cvt->setOutFormat(fffmt);
    m_cvt->setInSize(frame.width(), frame.height());
    m_cvt->setOutSize(frame.width(), frame.height());
    QVector<const uchar*> pitch(format.planeCount());
    QVector<int> stride(format.planeCount());
    for (int i = 0; i < format.planeCount(); ++i) {
        pitch[i] = frame.bits(i);
        stride[i] = frame.bytesPerLine(i);
    }
    if (!m_cvt->convert(pitch.constData(), stride.constData())) {
        return VideoFrame();
    }
    const VideoFormat fmt(fffmt);
    VideoFrame f(m_cvt->outData(), frame.width(), frame.height(), fmt);
    f.setBits(m_cvt->outPlanes());
    f.setBytesPerLine(m_cvt->outLineSizes());
    f.setTimestamp(frame.timestamp());
    // metadata?
    if (fmt.isRGB()) {
        f.setColorSpace(fmt.isPlanar() ? ColorSpace_GBR : ColorSpace_RGB);
    } else {
        f.setColorSpace(ColorSpace_Unknow);
    }
    return f;
}
示例#6
0
void ImageServer::emitTestImage()
{


    if(m_v4l2)
    {
        VideoFrame f = m_v4l2->readFrame();
        if(f.isValid())
            m_frame = f;

        //qglClearColor(Qt::black);
        //glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

        QImage image((const uchar*)m_frame.byteArray.constData(),m_frame.size.width(),m_frame.size.height(),QImage::Format_RGB32);
        //emit testImage(image.scaled(160,120), m_frame.captureTime);
        emit testImage(image, QTime::currentTime()); //m_frame.captureTime);
    }
    else
    {
        emit testImage(m_testImage, QTime::currentTime());
    }

}
示例#7
0
void VideoSurface::paintGL()
{
    mutex.lock();
    VideoFrame currFrame = frame;
    frame.invalidate();
    mutex.unlock();

    if (currFrame.isValid() && res != currFrame.resolution)
    {
        res = currFrame.resolution;

        // delete old texture
        if (textureId != 0)
            glDeleteTextures(1, &textureId);

        // a texture used to render the pbo (has the match the pixelformat of the source)
        glGenTextures(1,&textureId);
        glBindTexture(GL_TEXTURE_2D, textureId);
        glTexImage2D(GL_TEXTURE_2D,0, GL_RGB, res.width(), res.height(), 0, GL_RGB, GL_UNSIGNED_BYTE, 0);
        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
    }

    if (currFrame.isValid())
    {
        pboIndex = (pboIndex + 1) % 2;
        int nextPboIndex = (pboIndex + 1) % 2;

        if (pboAllocSize != currFrame.frameData.size())
        {
            qDebug() << "VideoSurface: Resize pbo " << currFrame.frameData.size() << "(" << currFrame.resolution << ")" << "bytes (before" << pboAllocSize << ")";

            pbo[0]->bind();
            pbo[0]->allocate(currFrame.frameData.size());
            pbo[0]->release();

            pbo[1]->bind();
            pbo[1]->allocate(currFrame.frameData.size());
            pbo[1]->release();

            pboAllocSize = currFrame.frameData.size();
        }


        pbo[pboIndex]->bind();
        glBindTexture(GL_TEXTURE_2D, textureId);
        glTexSubImage2D(GL_TEXTURE_2D,0,0,0, res.width(), res.height(), GL_RGB, GL_UNSIGNED_BYTE, 0);
        pbo[pboIndex]->unmap();
        pbo[pboIndex]->release();

        // transfer data
        pbo[nextPboIndex]->bind();
        void* ptr = pbo[nextPboIndex]->map(QOpenGLBuffer::WriteOnly);
        if (ptr)
            memcpy(ptr, currFrame.frameData.data(), currFrame.frameData.size());
        pbo[nextPboIndex]->unmap();
        pbo[nextPboIndex]->release();
    }

    // background
    glClearColor(0, 0, 0, 1);
    glClear(GL_COLOR_BUFFER_BIT);

    // keep aspect ratio
    float aspectRatio = float(res.width()) / float(res.height());
    if (width() < float(height()) * aspectRatio)
    {
        float h = float(width()) / aspectRatio;
        glViewport(0, (height() - h)*0.5f, width(), h);
    }
    else
    {
        float w = float(height()) * float(aspectRatio);
        glViewport((width() - w)*0.5f, 0, w, height());
    }

    QOpenGLShaderProgram* programm = nullptr;
    switch (frame.format)
    {
    case VideoFrame::YUV:
        programm = yuvProgramm;
        break;
    case VideoFrame::BGR:
        programm = bgrProgramm;
        break;
    default:
        break;
    }

    if (programm)
    {
        // render pbo
        static float values[] = {
            -1, -1,
            1, -1,
            -1, 1,
            1, 1
        };

        programm->bind();
        programm->setAttributeArray(0, GL_FLOAT, values, 2);
        programm->enableAttributeArray(0);
    }

    glBindTexture(GL_TEXTURE_2D, textureId);

    //draw fullscreen quad
    glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);

    glBindTexture(GL_TEXTURE_2D, 0);

    if (programm)
    {
        programm->disableAttributeArray(0);
        programm->release();
    }
}
 // return the key frame position
 bool extractInPrecision(qint64 value, int range) {
     frame = VideoFrame();
     if (value < demuxer.startTime())
         value += demuxer.startTime();
     demuxer.seek(value);
     const int vstream = demuxer.videoStream();
     Packet pkt;
     qint64 pts0 = -1;
     bool warn_bad_seek = true;
     bool warn_out_of_range = true;
     while (!demuxer.atEnd()) {
         if (!demuxer.readFrame())
             continue;
         if (demuxer.stream() != vstream)
             continue;
         pkt = demuxer.packet();
         if (pts0 < 0LL)
             pts0 = (qint64)(pkt.pts*1000.0);
         if ((qint64)(pkt.pts*1000.0) - value > (qint64)range) {
             if (warn_out_of_range)
                 qDebug("read packet out of range");
             warn_out_of_range = false;
             // No return because decoder needs more packets before the desired frame is decoded
             //return false;
         }
         //qDebug("video packet: %f", pkt.pts);
         // TODO: always key frame?
         if (pkt.hasKeyFrame)
             break;
         if (warn_bad_seek)
             qWarning("Not seek to key frame!!!");
         warn_bad_seek = false;
     }
     // enlarge range if seek to key-frame failed
     const qint64 key_pts = (qint64)(pkt.pts*1000.0);
     const bool enlarge_range = pts0 >= 0LL && key_pts - pts0 > 0LL;
     if (enlarge_range) {
         range = qMax<qint64>(key_pts - value, range);
         qDebug() << "enlarge range ==>>>> " << range;
     }
     if (!pkt.isValid()) {
         qWarning("VideoFrameExtractor failed to get a packet at %lld", value);
         return false;
     }
     decoder->flush(); //must flush otherwise old frames will be decoded at the beginning
     decoder->setOptions(dec_opt_normal);
     // must decode key frame
     int k = 0;
     while (k < 2 && !frame.isValid()) {
         //qWarning("invalid key frame!!!!! undecoded: %d", decoder->undecodedSize());
         if (decoder->decode(pkt)) {
             frame = decoder->frame();
         }
         ++k;
     }
     // if seek backward correctly to key frame, diff0 = t - value <= 0
     // but sometimes seek to no-key frame(and range is enlarged), diff0 >= 0
     // decode key frame
     const int diff0 = qint64(frame.timestamp()*1000.0) - value;
     if (qAbs(diff0) <= range) { //TODO: flag forward: result pts must >= value
         if (frame.isValid()) {
             qDebug() << "VideoFrameExtractor: key frame found @" << frame.timestamp() <<" diff=" << diff0 << ". format: " <<  frame.format();
             return true;
         }
     }
     QVariantHash* dec_opt = &dec_opt_normal; // 0: default, 1: framedrop
     // decode at the given position
     while (!demuxer.atEnd()) {
         if (!demuxer.readFrame())
             continue;
         if (demuxer.stream() != vstream)
             continue;
         pkt = demuxer.packet();
         const qreal t = pkt.pts;
         //qDebug("video packet: %f, delta=%lld", t, value - qint64(t*1000.0));
         if (!pkt.isValid()) {
             qWarning("invalid packet. no decode");
             continue;
         }
         if (pkt.hasKeyFrame) {
             // FIXME:
             //qCritical("Internal error. Can not be a key frame!!!!");
             //return false; //??
         }
         qint64 diff = qint64(t*1000.0) - value;
         QVariantHash *dec_opt_old = dec_opt;
         if (seek_count == 0 || diff >= 0)
             dec_opt = &dec_opt_normal;
         else
             dec_opt = &dec_opt_framedrop;
         if (dec_opt != dec_opt_old)
             decoder->setOptions(*dec_opt);
         // invalid packet?
         if (!decoder->decode(pkt)) {
             qWarning("!!!!!!!!!decode failed!!!!");
             frame = VideoFrame();
             return false;
         }
         // store the last decoded frame because next frame may be out of range
         const VideoFrame f = decoder->frame();
         if (!f.isValid()) {
             //qDebug("VideoFrameExtractor: invalid frame!!!");
             continue;
         }
         frame = f;
         const qreal pts = frame.timestamp();
         const qint64 pts_ms = pts*1000.0;
         if (pts_ms < value)
             continue; //
         diff = pts_ms - value;
         if (qAbs(diff) <= (qint64)range) {
             qDebug("got frame at %fs, diff=%lld", pts, diff);
             break;
         }
         // if decoder was not flushed, we may get old frame which is acceptable
         if (diff > range && t > pts) {
             qWarning("out pts out of range. diff=%lld, range=%d", diff, range);
             frame = VideoFrame();
             return false;
         }
     }
     ++seek_count;
     // now we get the final frame
     return true;
 }
示例#9
0
bool VideoEncoderFFmpeg::encode(const VideoFrame &frame)
{
    DPTR_D(VideoEncoderFFmpeg);
    QScopedPointer<AVFrame, ScopedAVFrameDeleter> f;
    // hwupload
    AVPixelFormat pixfmt = AVPixelFormat(frame.pixelFormatFFmpeg());
    if (frame.isValid()) {
        f.reset(av_frame_alloc());
        f->format = pixfmt;
        f->width = frame.width();
        f->height = frame.height();
//        f->quality = d.avctx->global_quality;
        switch (timestampMode()) {
        case TimestampCopy:
            f->pts = int64_t(frame.timestamp()*frameRate()); // TODO: check monotically increase and fix if not. or another mode?
            break;
        case TimestampMonotonic:
            f->pts = d.nb_encoded+1;
            break;
        default:
            break;
        }

        // pts is set in muxer
        const int nb_planes = frame.planeCount();
        for (int i = 0; i < nb_planes; ++i) {
            f->linesize[i] = frame.bytesPerLine(i);
            f->data[i] = (uint8_t*)frame.constBits(i);
        }
        if (d.avctx->width <= 0) {
            d.avctx->width = frame.width();
        }
        if (d.avctx->height <= 0) {
            d.avctx->height = frame.width();
        }
#ifdef HAVE_AVHWCTX
        if (d.avctx->hw_frames_ctx) {
            // TODO: try to map to SourceSurface
            // checl valid sw_formats
            if (!d.hwframes_ref) {
                qWarning("no hw frame context for uploading");
                return false;
            }
            if (pixfmt != d.hwframes->sw_format) {
                // reinit or got an unsupported format. assume parameters will not change, so it's  the 1st init
                // check constraints
                bool init_frames_ctx = d.hwframes->sw_format == AVPixelFormat(-1);
                if (d.sw_fmts.contains(pixfmt)) { // format changed
                    init_frames_ctx = true;
                } else { // convert to supported sw format
                    pixfmt = d.sw_fmts[0];
                    f->format = pixfmt;
                    VideoFrame converted = frame.to(VideoFormat::pixelFormatFromFFmpeg(pixfmt));
                    for (int i = 0; i < converted.planeCount(); ++i) {
                        f->linesize[i] = converted.bytesPerLine(i);
                        f->data[i] = (uint8_t*)frame.constBits(i);
                    }
                }
                if (init_frames_ctx) {
                    d.hwframes->sw_format = pixfmt;
                    d.hwframes->width = frame.width();
                    d.hwframes->height = frame.height();
                    AV_ENSURE(av_hwframe_ctx_init(d.hwframes_ref), false);
                }
            }
            // upload
            QScopedPointer<AVFrame, ScopedAVFrameDeleter> hwf( av_frame_alloc());
            AV_ENSURE(av_hwframe_get_buffer(d.hwframes_ref, hwf.data(), 0), false);
            //hwf->format = d.hwframes->format; // not necessary
            //hwf->width = f->width;
            //hwf->height = f->height;
            AV_ENSURE(av_hwframe_transfer_data(hwf.data(), f.data(), 0), false);
            AV_ENSURE(av_frame_copy_props(hwf.data(), f.data()), false);
            av_frame_unref(f.data());
            av_frame_move_ref(f.data(), hwf.data());
        }
#endif //HAVE_AVHWCTX
    }
    AVPacket pkt;
    av_init_packet(&pkt);
    pkt.data = (uint8_t*)d.buffer.constData();
    pkt.size = d.buffer.size();
    int got_packet = 0;
    int ret = avcodec_encode_video2(d.avctx, &pkt, f.data(), &got_packet);
    if (ret < 0) {
        qWarning("error avcodec_encode_video2: %s" ,av_err2str(ret));
        return false; //false
    }
    d.nb_encoded++;
    if (!got_packet) {
        qWarning("no packet got");
        d.packet = Packet();
        // invalid frame means eof
        return frame.isValid();
    }
   // qDebug("enc avpkt.pts: %lld, dts: %lld.", pkt.pts, pkt.dts);
    d.packet = Packet::fromAVPacket(&pkt, av_q2d(d.avctx->time_base));
   // qDebug("enc packet.pts: %.3f, dts: %.3f.", d.packet.pts, d.packet.dts);
    return true;
}