示例#1
0
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
    AVFilterContext *ctx = inlink->dst;
    AVFilterLink *outlink = ctx->outputs[0];
    SideDataContext *s = ctx->priv;
    AVFrameSideData *sd = NULL;

    if (s->type != -1)
       sd = av_frame_get_side_data(frame, s->type);

    switch (s->mode) {
    case SIDEDATA_SELECT:
        if (sd) {
            return ff_filter_frame(outlink, frame);
        }
        break;
    case SIDEDATA_DELETE:
        if (s->type == -1) {
            while (frame->nb_side_data)
                av_frame_remove_side_data(frame, frame->side_data[0]->type);
        } else if (sd) {
            av_frame_remove_side_data(frame, s->type);
        }
        return ff_filter_frame(outlink, frame);
        break;
    default:
        av_assert0(0);
    };

    av_frame_free(&frame);

    return 0;
}
示例#2
0
文件: af_volume.c 项目: AVLeo/libav
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
    VolumeContext *vol    = inlink->dst->priv;
    AVFilterLink *outlink = inlink->dst->outputs[0];
    int nb_samples        = buf->nb_samples;
    AVFrame *out_buf;
    AVFrameSideData *sd = av_frame_get_side_data(buf, AV_FRAME_DATA_REPLAYGAIN);
    int ret;

    if (sd && vol->replaygain != REPLAYGAIN_IGNORE) {
        if (vol->replaygain != REPLAYGAIN_DROP) {
            AVReplayGain *replaygain = (AVReplayGain*)sd->data;
            int32_t gain  = 100000;
            uint32_t peak = 100000;
            float g, p;

            if (vol->replaygain == REPLAYGAIN_TRACK &&
                replaygain->track_gain != INT32_MIN) {
                gain = replaygain->track_gain;

                if (replaygain->track_peak != 0)
                    peak = replaygain->track_peak;
            } else if (replaygain->album_gain != INT32_MIN) {
                gain = replaygain->album_gain;

                if (replaygain->album_peak != 0)
                    peak = replaygain->album_peak;
            } else {
                av_log(inlink->dst, AV_LOG_WARNING, "Both ReplayGain gain "
                       "values are unknown.\n");
            }
            g = gain / 100000.0f;
            p = peak / 100000.0f;

            av_log(inlink->dst, AV_LOG_VERBOSE,
                   "Using gain %f dB from replaygain side data.\n", g);

            vol->volume   = pow(10, (g + vol->replaygain_preamp) / 20);
            if (vol->replaygain_noclip)
                vol->volume = FFMIN(vol->volume, 1.0 / p);
            vol->volume_i = (int)(vol->volume * 256 + 0.5);

            volume_init(vol);
        }
        av_frame_remove_side_data(buf, AV_FRAME_DATA_REPLAYGAIN);
    }

    if (vol->volume == 1.0 || vol->volume_i == 256)
        return ff_filter_frame(outlink, buf);

    /* do volume scaling in-place if input buffer is writable */
    if (av_frame_is_writable(buf)) {
        out_buf = buf;
    } else {
        out_buf = ff_get_audio_buffer(inlink, nb_samples);
        if (!out_buf)
            return AVERROR(ENOMEM);
        ret = av_frame_copy_props(out_buf, buf);
        if (ret < 0) {
            av_frame_free(&out_buf);
            av_frame_free(&buf);
            return ret;
        }
    }

    if (vol->precision != PRECISION_FIXED || vol->volume_i > 0) {
        int p, plane_samples;

        if (av_sample_fmt_is_planar(buf->format))
            plane_samples = FFALIGN(nb_samples, vol->samples_align);
        else
            plane_samples = FFALIGN(nb_samples * vol->channels, vol->samples_align);

        if (vol->precision == PRECISION_FIXED) {
            for (p = 0; p < vol->planes; p++) {
                vol->scale_samples(out_buf->extended_data[p],
                                   buf->extended_data[p], plane_samples,
                                   vol->volume_i);
            }
        } else if (av_get_packed_sample_fmt(vol->sample_fmt) == AV_SAMPLE_FMT_FLT) {
            for (p = 0; p < vol->planes; p++) {
                vol->fdsp.vector_fmul_scalar((float *)out_buf->extended_data[p],
                                             (const float *)buf->extended_data[p],
                                             vol->volume, plane_samples);
            }
        } else {
            for (p = 0; p < vol->planes; p++) {
                vol->fdsp.vector_dmul_scalar((double *)out_buf->extended_data[p],
                                             (const double *)buf->extended_data[p],
                                             vol->volume, plane_samples);
            }
        }
    }

    emms_c();

    if (buf != out_buf)
        av_frame_free(&buf);

    return ff_filter_frame(outlink, out_buf);
}
示例#3
0
/* put sequence header if needed */
static void mpeg1_encode_sequence_header(MpegEncContext *s)
{
    unsigned int vbv_buffer_size, fps, v;
    int i, constraint_parameter_flag;
    uint64_t time_code;
    int64_t best_aspect_error = INT64_MAX;
    AVRational aspect_ratio = s->avctx->sample_aspect_ratio;

    if (aspect_ratio.num == 0 || aspect_ratio.den == 0)
        aspect_ratio = (AVRational){1,1};             // pixel aspect 1.1 (VGA)

    if (s->current_picture.f->key_frame) {
        AVRational framerate = ff_mpeg12_frame_rate_tab[s->frame_rate_index];

        /* mpeg1 header repeated every gop */
        put_header(s, SEQ_START_CODE);

        put_sbits(&s->pb, 12, s->width  & 0xFFF);
        put_sbits(&s->pb, 12, s->height & 0xFFF);

        for (i = 1; i < 15; i++) {
            int64_t error = aspect_ratio.num * (1LL<<32) / aspect_ratio.den;
            if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || i <= 1)
                error -= (1LL<<32) / ff_mpeg1_aspect[i];
            else
                error -= (1LL<<32)*ff_mpeg2_aspect[i].num * s->height / s->width / ff_mpeg2_aspect[i].den;

            error = FFABS(error);

            if (error - 2 <= best_aspect_error) {
                best_aspect_error    = error;
                s->aspect_ratio_info = i;
            }
        }

        put_bits(&s->pb, 4, s->aspect_ratio_info);
        put_bits(&s->pb, 4, s->frame_rate_index);

        if (s->avctx->rc_max_rate) {
            v = (s->avctx->rc_max_rate + 399) / 400;
            if (v > 0x3ffff && s->codec_id == AV_CODEC_ID_MPEG1VIDEO)
                v = 0x3ffff;
        } else {
            v = 0x3FFFF;
        }

        if (s->avctx->rc_buffer_size)
            vbv_buffer_size = s->avctx->rc_buffer_size;
        else
            /* VBV calculation: Scaled so that a VCD has the proper
             * VBV size of 40 kilobytes */
            vbv_buffer_size = ((20 * s->bit_rate) / (1151929 / 2)) * 8 * 1024;
        vbv_buffer_size = (vbv_buffer_size + 16383) / 16384;

        put_sbits(&s->pb, 18, v);
        put_bits(&s->pb, 1, 1);         // marker
        put_sbits(&s->pb, 10, vbv_buffer_size);

        constraint_parameter_flag =
            s->width  <= 768                                    &&
            s->height <= 576                                    &&
            s->mb_width * s->mb_height                 <= 396   &&
            s->mb_width * s->mb_height * framerate.num <= 396 * 25 * framerate.den &&
            framerate.num <= framerate.den * 30                 &&
            s->avctx->me_range                                  &&
            s->avctx->me_range < 128                            &&
            vbv_buffer_size <= 20                               &&
            v <= 1856000 / 400                                  &&
            s->codec_id == AV_CODEC_ID_MPEG1VIDEO;

        put_bits(&s->pb, 1, constraint_parameter_flag);

        ff_write_quant_matrix(&s->pb, s->avctx->intra_matrix);
        ff_write_quant_matrix(&s->pb, s->avctx->inter_matrix);

        if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
            AVFrameSideData *side_data;
            int width = s->width;
            int height = s->height;
            int use_seq_disp_ext;

            put_header(s, EXT_START_CODE);
            put_bits(&s->pb, 4, 1);                 // seq ext

            put_bits(&s->pb, 1, s->avctx->profile == 0); // escx 1 for 4:2:2 profile

            put_bits(&s->pb, 3, s->avctx->profile); // profile
            put_bits(&s->pb, 4, s->avctx->level);   // level

            put_bits(&s->pb, 1, s->progressive_sequence);
            put_bits(&s->pb, 2, s->chroma_format);
            put_bits(&s->pb, 2, s->width  >> 12);
            put_bits(&s->pb, 2, s->height >> 12);
            put_bits(&s->pb, 12, v >> 18);          // bitrate ext
            put_bits(&s->pb, 1, 1);                 // marker
            put_bits(&s->pb, 8, vbv_buffer_size >> 10); // vbv buffer ext
            put_bits(&s->pb, 1, s->low_delay);
            put_bits(&s->pb, 2, s->mpeg2_frame_rate_ext.num-1); // frame_rate_ext_n
            put_bits(&s->pb, 5, s->mpeg2_frame_rate_ext.den-1); // frame_rate_ext_d

            side_data = av_frame_get_side_data(s->current_picture_ptr->f, AV_FRAME_DATA_PANSCAN);
            if (side_data) {
                AVPanScan *pan_scan = (AVPanScan *)side_data->data;
                if (pan_scan->width && pan_scan->height) {
                    width = pan_scan->width >> 4;
                    height = pan_scan->height >> 4;
                }
            }
示例#4
0
bool CDVDVideoCodecFFmpeg::GetPictureCommon(VideoPicture* pVideoPicture)
{
  if (!m_pFrame)
    return false;

  pVideoPicture->iWidth = m_pFrame->width;
  pVideoPicture->iHeight = m_pFrame->height;

  /* crop of 10 pixels if demuxer asked it */
  if(m_pCodecContext->coded_width  && m_pCodecContext->coded_width  < (int)pVideoPicture->iWidth
                                   && m_pCodecContext->coded_width  > (int)pVideoPicture->iWidth  - 10)
    pVideoPicture->iWidth = m_pCodecContext->coded_width;

  if(m_pCodecContext->coded_height && m_pCodecContext->coded_height < (int)pVideoPicture->iHeight
                                   && m_pCodecContext->coded_height > (int)pVideoPicture->iHeight - 10)
    pVideoPicture->iHeight = m_pCodecContext->coded_height;

  double aspect_ratio;

  /* use variable in the frame */
  AVRational pixel_aspect = m_pFrame->sample_aspect_ratio;

  if (pixel_aspect.num == 0)
    aspect_ratio = 0;
  else
    aspect_ratio = av_q2d(pixel_aspect) * pVideoPicture->iWidth / pVideoPicture->iHeight;

  if (aspect_ratio <= 0.0)
    aspect_ratio = (float)pVideoPicture->iWidth / (float)pVideoPicture->iHeight;

  if (m_DAR != aspect_ratio)
  {
    m_DAR = aspect_ratio;
    m_processInfo.SetVideoDAR(static_cast<float>(m_DAR));
  }

  /* XXX: we suppose the screen has a 1.0 pixel ratio */ // CDVDVideo will compensate it.
  pVideoPicture->iDisplayHeight = pVideoPicture->iHeight;
  pVideoPicture->iDisplayWidth  = ((int)RINT(pVideoPicture->iHeight * aspect_ratio)) & -3;
  if (pVideoPicture->iDisplayWidth > pVideoPicture->iWidth)
  {
    pVideoPicture->iDisplayWidth  = pVideoPicture->iWidth;
    pVideoPicture->iDisplayHeight = ((int)RINT(pVideoPicture->iWidth / aspect_ratio)) & -3;
  }


  pVideoPicture->pts = DVD_NOPTS_VALUE;

  AVDictionaryEntry * entry = av_dict_get(m_pFrame->metadata, "stereo_mode", NULL, 0);
  if(entry && entry->value)
  {
    pVideoPicture->stereoMode = (const char*)entry->value;
  }
  else
    pVideoPicture->stereoMode.clear();

  pVideoPicture->iRepeatPicture = 0.5 * m_pFrame->repeat_pict;
  pVideoPicture->iFlags = 0;
  pVideoPicture->iFlags |= m_pFrame->interlaced_frame ? DVP_FLAG_INTERLACED : 0;
  pVideoPicture->iFlags |= m_pFrame->top_field_first ? DVP_FLAG_TOP_FIELD_FIRST: 0;

  if (m_codecControlFlags & DVD_CODEC_CTRL_DROP)
  {
    pVideoPicture->iFlags |= DVP_FLAG_DROPPED;
  }

  pVideoPicture->chroma_position = m_pCodecContext->chroma_sample_location;
  pVideoPicture->color_primaries = m_pCodecContext->color_primaries;
  pVideoPicture->color_transfer = m_pCodecContext->color_trc;
  pVideoPicture->color_space = m_pCodecContext->colorspace;
  pVideoPicture->colorBits = 8;
  // determine how number of bits of encoded video
  if (m_pCodecContext->pix_fmt == AV_PIX_FMT_YUV420P12)
    pVideoPicture->colorBits = 12;
  else if (m_pCodecContext->pix_fmt == AV_PIX_FMT_YUV420P10)
    pVideoPicture->colorBits = 10;
  else if (m_pCodecContext->codec_id == AV_CODEC_ID_HEVC &&
           m_pCodecContext->profile == FF_PROFILE_HEVC_MAIN_10)
    pVideoPicture->colorBits = 10;
  else if (m_pCodecContext->codec_id == AV_CODEC_ID_H264 &&
           (m_pCodecContext->profile == FF_PROFILE_H264_HIGH_10||
            m_pCodecContext->profile == FF_PROFILE_H264_HIGH_10_INTRA))
    pVideoPicture->colorBits = 10;

  if (m_pCodecContext->color_range == AVCOL_RANGE_JPEG ||
    m_pCodecContext->pix_fmt == AV_PIX_FMT_YUVJ420P)
    pVideoPicture->color_range = 1;
  else
    pVideoPicture->color_range = 0;

  pVideoPicture->qp_table = av_frame_get_qp_table(m_pFrame,
                                                  &pVideoPicture->qstride,
                                                  &pVideoPicture->qscale_type);
  pVideoPicture->pict_type = m_pFrame->pict_type;

  // metadata
  pVideoPicture->hasDisplayMetadata = false;
  pVideoPicture->hasLightMetadata = false;
  AVFrameSideData *sd = av_frame_get_side_data(m_pFrame, AV_FRAME_DATA_MASTERING_DISPLAY_METADATA);
  if (sd)
  {
    pVideoPicture->displayMetadata = *(AVMasteringDisplayMetadata *)sd->data;
    pVideoPicture->hasDisplayMetadata = true;
  }
  sd = av_frame_get_side_data(m_pFrame, AV_FRAME_DATA_CONTENT_LIGHT_LEVEL);
  if (sd)
  {
    pVideoPicture->lightMetadata = *(AVContentLightMetadata *)sd->data;
    pVideoPicture->hasLightMetadata = true;
  }

  if (pVideoPicture->iRepeatPicture)
    pVideoPicture->dts = DVD_NOPTS_VALUE;
  else
    pVideoPicture->dts = m_dts;

  m_dts = DVD_NOPTS_VALUE;

  int64_t bpts = m_pFrame->best_effort_timestamp;
  if (bpts != AV_NOPTS_VALUE)
  {
    pVideoPicture->pts = (double)bpts * DVD_TIME_BASE / AV_TIME_BASE;
    if (pVideoPicture->pts == m_decoderPts)
    {
      pVideoPicture->iRepeatPicture = -0.5;
      pVideoPicture->pts = DVD_NOPTS_VALUE;
      pVideoPicture->dts = DVD_NOPTS_VALUE;
    }
  }
  else
    pVideoPicture->pts = DVD_NOPTS_VALUE;

  if (pVideoPicture->pts != DVD_NOPTS_VALUE)
    m_decoderPts = pVideoPicture->pts;

  if (m_requestSkipDeint)
  {
    pVideoPicture->iFlags |= DVD_CODEC_CTRL_SKIPDEINT;
    m_skippedDeint++;
  }

  m_requestSkipDeint = false;
  pVideoPicture->iFlags |= m_codecControlFlags;

  return true;
}
示例#5
0
bool StAVImage::load(const StString& theFilePath,
                     ImageType       theImageType,
                     uint8_t*        theDataPtr,
                     int             theDataSize) {

    // reset current data
    StImage::nullify();
    setState();
    close();
    myMetadata.clear();

    switch(theImageType) {
        case ST_TYPE_PNG:
        case ST_TYPE_PNS: {
            myCodec = avcodec_find_decoder_by_name("png");
            break;
        }
        case ST_TYPE_JPEG:
        case ST_TYPE_MPO:
        case ST_TYPE_JPS: {
            myCodec = avcodec_find_decoder_by_name("mjpeg");
            break;
        }
        case ST_TYPE_EXR: {
            myCodec = avcodec_find_decoder_by_name("exr");
            break;
        }
        case ST_TYPE_WEBP:
        case ST_TYPE_WEBPLL: {
            myCodec = avcodec_find_decoder_by_name("webp");
            break;
        }
        default: {
            break;
        }
    }

    if(theImageType == ST_TYPE_NONE
    || (theDataPtr == NULL && !StFileNode::isFileExists(theFilePath))) {
        // open image file and detect its type, its could be non local file!
    #if(LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(53, 2, 0))
        int avErrCode = avformat_open_input(&myFormatCtx, theFilePath.toCString(), myImageFormat, NULL);
    #else
        int avErrCode = av_open_input_file (&myFormatCtx, theFilePath.toCString(), myImageFormat, 0, NULL);
    #endif
        if(avErrCode != 0
        || myFormatCtx->nb_streams < 1
        || myFormatCtx->streams[0]->codec->codec_id == 0) {
            if(myFormatCtx != NULL) {
            #if(LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(53, 17, 0))
                avformat_close_input(&myFormatCtx);
            #else
                av_close_input_file(myFormatCtx);
                myFormatCtx = NULL;
            #endif
            }

        #if(LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(53, 2, 0))
            avErrCode = avformat_open_input(&myFormatCtx, theFilePath.toCString(), NULL, NULL);
        #else
            avErrCode = av_open_input_file(&myFormatCtx, theFilePath.toCString(), NULL, 0, NULL);
        #endif
        }

        if(avErrCode != 0
        || myFormatCtx->nb_streams < 1) {
            setState(StString("AVFormat library, couldn't open image file. Error: ") + stAV::getAVErrorDescription(avErrCode));
            close();
            return false;
        }

        // find the decoder for the video stream
        myCodecCtx = myFormatCtx->streams[0]->codec;
        if(theImageType == ST_TYPE_NONE) {
            myCodec = avcodec_find_decoder(myCodecCtx->codec_id);
        }
    }

    if(myCodec == NULL) {
        setState("AVCodec library, video codec not found");
        close();
        return false;
    } else if(myFormatCtx == NULL) {
        // use given image type to load decoder
    #if(LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(53, 8, 0))
        myCodecCtx = avcodec_alloc_context3(myCodec);
    #else
        myCodecCtx = avcodec_alloc_context();
    #endif
    }

    // stupid check
    if(myCodecCtx == NULL) {
        setState("AVCodec library, codec context is NULL");
        close();
        return false;
    }

    // open VIDEO codec
#if(LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(53, 8, 0))
    if(avcodec_open2(myCodecCtx, myCodec, NULL) < 0) {
#else
    if(avcodec_open(myCodecCtx, myCodec) < 0) {
#endif
        setState("AVCodec library, could not open video codec");
        close();
        return false;
    }

    // read one packet or file
    StRawFile aRawFile(theFilePath);
    StAVPacket anAvPkt;
    if(theDataPtr != NULL && theDataSize != 0) {
        anAvPkt.getAVpkt()->data = theDataPtr;
        anAvPkt.getAVpkt()->size = theDataSize;
    } else {
        if(myFormatCtx != NULL) {
            if(av_read_frame(myFormatCtx, anAvPkt.getAVpkt()) < 0) {
                setState("AVFormat library, could not read first packet");
                close();
                return false;
            }
        } else {
            if(!aRawFile.readFile()) {
                setState("StAVImage, could not read the file");
                close();
                return false;
            }
            anAvPkt.getAVpkt()->data = (uint8_t* )aRawFile.getBuffer();
            anAvPkt.getAVpkt()->size = (int )aRawFile.getSize();
        }
    }
    anAvPkt.setKeyFrame();

    // decode one frame
    int isFrameFinished = 0;
#if(LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(52, 23, 0))
    avcodec_decode_video2(myCodecCtx, myFrame.Frame, &isFrameFinished, anAvPkt.getAVpkt());
#else
    avcodec_decode_video(myCodecCtx, myFrame.Frame, &isFrameFinished,
                         theDataPtr, theDataSize);
#endif

    if(isFrameFinished == 0) {
        // thats not an image!!! try to decode more packets???
        setState("AVCodec library, input file is not an Image!");
        close();
        return false;
    }

    // check frame size
    if(myCodecCtx->width <= 0 || myCodecCtx->height <= 0) {
        setState("AVCodec library, codec returns wrong frame size");
        close();
        return false;
    }

    // read aspect ratio
    if(myCodecCtx->sample_aspect_ratio.num == 0
    || myCodecCtx->sample_aspect_ratio.den == 0) {
        setPixelRatio(1.0f);
    } else {
        const GLfloat aRatio = GLfloat(myCodecCtx->sample_aspect_ratio.num) / GLfloat(myCodecCtx->sample_aspect_ratio.den);
        if(aRatio > 70.0f) {
            ST_DEBUG_LOG("AVCodec library, igning wrong PAR " + myCodecCtx->sample_aspect_ratio.num + ":" + myCodecCtx->sample_aspect_ratio.den);
            setPixelRatio(1.0f);
        } else {
            setPixelRatio(aRatio);
        }
    }

#ifdef ST_AV_NEWSTEREO
    // currently it is unlikelly... but maybe in future?
    AVFrameSideData* aSideData = av_frame_get_side_data(myFrame.Frame, AV_FRAME_DATA_STEREO3D);
    if(aSideData != NULL) {
        AVStereo3D* aStereo = (AVStereo3D* )aSideData->data;
        mySrcFormat = stAV::stereo3dAvToSt(aStereo->type);
        if(aStereo->flags & AV_STEREO3D_FLAG_INVERT) {
            mySrcFormat = st::formatReversed(mySrcFormat);
        }
    } else {
        mySrcFormat = StFormat_AUTO;
    }
#endif

    // it is unlikely that there would be any metadata from format...
    // but lets try
    if(myFormatCtx != NULL) {
        for(stAV::meta::Tag* aTag = stAV::meta::findTag(myFormatCtx->metadata, "", NULL, stAV::meta::SEARCH_IGNORE_SUFFIX);
            aTag != NULL;
            aTag = stAV::meta::findTag(myFormatCtx->metadata, "", aTag, stAV::meta::SEARCH_IGNORE_SUFFIX)) {
            myMetadata.add(StDictEntry(aTag->key, aTag->value));
        }
        for(stAV::meta::Tag* aTag = stAV::meta::findTag(myFormatCtx->streams[0]->metadata, "", NULL, stAV::meta::SEARCH_IGNORE_SUFFIX);
            aTag != NULL;
            aTag = stAV::meta::findTag(myFormatCtx->streams[0]->metadata, "", aTag, stAV::meta::SEARCH_IGNORE_SUFFIX)) {
            myMetadata.add(StDictEntry(aTag->key, aTag->value));
        }
    }

    // collect metadata from the frame
    stAV::meta::Dict* aFrameMetadata = stAV::meta::getFrameMetadata(myFrame.Frame);
    for(stAV::meta::Tag* aTag = stAV::meta::findTag(aFrameMetadata, "", NULL, stAV::meta::SEARCH_IGNORE_SUFFIX);
        aTag != NULL;
        aTag = stAV::meta::findTag(aFrameMetadata, "", aTag, stAV::meta::SEARCH_IGNORE_SUFFIX)) {
        myMetadata.add(StDictEntry(aTag->key, aTag->value));
    }

    stAV::dimYUV aDimsYUV;
    if(myCodecCtx->pix_fmt == stAV::PIX_FMT::RGB24) {
        setColorModel(StImage::ImgColor_RGB);
        changePlane(0).initWrapper(StImagePlane::ImgRGB, myFrame.getPlane(0),
                                   myCodecCtx->width, myCodecCtx->height,
                                   myFrame.getLineSize(0));
    } else if(myCodecCtx->pix_fmt == stAV::PIX_FMT::BGR24) {
        setColorModel(StImage::ImgColor_RGB);
        changePlane(0).initWrapper(StImagePlane::ImgBGR, myFrame.getPlane(0),
                                   myCodecCtx->width, myCodecCtx->height,
                                   myFrame.getLineSize(0));
    } else if(myCodecCtx->pix_fmt == stAV::PIX_FMT::RGBA32) {
        setColorModel(StImage::ImgColor_RGBA);
        changePlane(0).initWrapper(StImagePlane::ImgRGBA, myFrame.getPlane(0),
                                   myCodecCtx->width, myCodecCtx->height,
                                   myFrame.getLineSize(0));
    } else if(myCodecCtx->pix_fmt == stAV::PIX_FMT::BGRA32) {
        setColorModel(StImage::ImgColor_RGBA);
        changePlane(0).initWrapper(StImagePlane::ImgBGRA, myFrame.getPlane(0),
                                   myCodecCtx->width, myCodecCtx->height,
                                   myFrame.getLineSize(0));
    } else if(myCodecCtx->pix_fmt == stAV::PIX_FMT::GRAY8) {
        setColorModel(StImage::ImgColor_GRAY);
        changePlane(0).initWrapper(StImagePlane::ImgGray, myFrame.getPlane(0),
                                   myCodecCtx->width, myCodecCtx->height,
                                   myFrame.getLineSize(0));
    } else if(myCodecCtx->pix_fmt == stAV::PIX_FMT::GRAY16) {
        setColorModel(StImage::ImgColor_GRAY);
        changePlane(0).initWrapper(StImagePlane::ImgGray16, myFrame.getPlane(0),
                                   myCodecCtx->width, myCodecCtx->height,
                                   myFrame.getLineSize(0));
    } else if(myCodecCtx->pix_fmt == stAV::PIX_FMT::RGB48) {
        setColorModel(StImage::ImgColor_RGB);
        changePlane(0).initWrapper(StImagePlane::ImgRGB48, myFrame.getPlane(0),
                                   myCodecCtx->width, myCodecCtx->height,
                                   myFrame.getLineSize(0));
    } else if(myCodecCtx->pix_fmt == stAV::PIX_FMT::RGBA64) {
        setColorModel(StImage::ImgColor_RGBA);
        changePlane(0).initWrapper(StImagePlane::ImgRGBA64, myFrame.getPlane(0),
                                   myCodecCtx->width, myCodecCtx->height,
                                   myFrame.getLineSize(0));
    } else if(stAV::isFormatYUVPlanar(myCodecCtx, aDimsYUV)) {
    #if(LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(52, 29, 0))
        if(myCodecCtx->color_range == AVCOL_RANGE_JPEG) {
            aDimsYUV.isFullScale = true;
        }
    #endif
        setColorModel(StImage::ImgColor_YUV);
        setColorScale(aDimsYUV.isFullScale ? StImage::ImgScale_Full : StImage::ImgScale_Mpeg);
        StImagePlane::ImgFormat aPlaneFrmt = StImagePlane::ImgGray;
        if(aDimsYUV.bitsPerComp == 9) {
            aPlaneFrmt = StImagePlane::ImgGray16;
            setColorScale(aDimsYUV.isFullScale ? StImage::ImgScale_Jpeg9  : StImage::ImgScale_Mpeg9);
        } else if(aDimsYUV.bitsPerComp == 10) {
            aPlaneFrmt = StImagePlane::ImgGray16;
            setColorScale(aDimsYUV.isFullScale ? StImage::ImgScale_Jpeg10 : StImage::ImgScale_Mpeg10);
        } else if(aDimsYUV.bitsPerComp == 16) {
            aPlaneFrmt = StImagePlane::ImgGray16;
        }

        changePlane(0).initWrapper(aPlaneFrmt, myFrame.getPlane(0),
                                   size_t(aDimsYUV.widthY), size_t(aDimsYUV.heightY), myFrame.getLineSize(0));
        changePlane(1).initWrapper(aPlaneFrmt, myFrame.getPlane(1),
                                   size_t(aDimsYUV.widthU), size_t(aDimsYUV.heightU), myFrame.getLineSize(1));
        changePlane(2).initWrapper(aPlaneFrmt, myFrame.getPlane(2),
                                   size_t(aDimsYUV.widthV), size_t(aDimsYUV.heightV), myFrame.getLineSize(2));
    } else {
        ///ST_DEBUG_LOG("StAVImage, perform conversion from Pixel format '" + avcodec_get_pix_fmt_name(myCodecCtx->pix_fmt) + "' to RGB");
        // initialize software scaler/converter
        SwsContext* pToRgbCtx = sws_getContext(myCodecCtx->width, myCodecCtx->height, myCodecCtx->pix_fmt,    // source
                                               myCodecCtx->width, myCodecCtx->height, stAV::PIX_FMT::RGB24, // destination
                                               SWS_BICUBIC, NULL, NULL, NULL);
        if(pToRgbCtx == NULL) {
            setState("SWScale library, failed to create SWScaler context");
            close();
            return false;
        }

        // initialize additional buffer for converted RGB data
        setColorModel(StImage::ImgColor_RGB);
        changePlane(0).initTrash(StImagePlane::ImgRGB,
                                 myCodecCtx->width, myCodecCtx->height);

        uint8_t* rgbData[4]; stMemZero(rgbData,     sizeof(rgbData));
        int  rgbLinesize[4]; stMemZero(rgbLinesize, sizeof(rgbLinesize));
        rgbData[0]     = changePlane(0).changeData();
        rgbLinesize[0] = (int )changePlane(0).getSizeRowBytes();

        sws_scale(pToRgbCtx,
                  myFrame.Frame->data, myFrame.Frame->linesize,
                  0, myCodecCtx->height,
                  rgbData, rgbLinesize);
        // reset original data
        closeAvCtx();

        sws_freeContext(pToRgbCtx);
    }

    // set debug information
    StString aDummy, aFileName;
    StFileNode::getFolderAndFile(theFilePath, aDummy, aFileName);
    setState(StString("AVCodec library, loaded image '") + aFileName + "' " + getDescription());

    // we should not close the file because decoded image data is in codec context cache
    return true;
}

bool StAVImage::save(const StString& theFilePath,
                     ImageType       theImageType,
                     StFormat        theSrcFormat) {
    close();
    setState();
    if(isNull()) {
        return false;
    }

    PixelFormat aPFormatAV = (PixelFormat )getAVPixelFormat(*this);
    StImage anImage;
    switch(theImageType) {
        case ST_TYPE_PNG:
        case ST_TYPE_PNS: {
            myCodec = avcodec_find_encoder_by_name("png");
            if(myCodec == NULL) {
                setState("AVCodec library, video codec 'png' not found");
                close();
                return false;
            }
            if(aPFormatAV == stAV::PIX_FMT::RGB24
            || aPFormatAV == stAV::PIX_FMT::RGBA32
            || aPFormatAV == stAV::PIX_FMT::GRAY8) {
                anImage.initWrapper(*this);
            } else {
                // convert to compatible pixel format
                anImage.changePlane().initTrash(StImagePlane::ImgRGB, getSizeX(), getSizeY(), getAligned(getSizeX() * 3));
                PixelFormat aPFrmtTarget = stAV::PIX_FMT::RGB24;
                if(!convert(*this,   aPFormatAV,
                            anImage, aPFrmtTarget)) {
                    setState("SWScale library, failed to create SWScaler context");
                    close();
                    return false;
                }
                aPFormatAV = aPFrmtTarget;
            }
        #if(LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(53, 8, 0))
            myCodecCtx = avcodec_alloc_context3(myCodec);
        #else
            myCodecCtx = avcodec_alloc_context();
        #endif

            // setup encoder
            myCodecCtx->pix_fmt = aPFormatAV;
            myCodecCtx->width   = (int )anImage.getSizeX();
            myCodecCtx->height  = (int )anImage.getSizeY();
            myCodecCtx->compression_level = 9; // 0..9
            break;
        }
        case ST_TYPE_JPEG:
        case ST_TYPE_MPO:
        case ST_TYPE_JPS: {
            myCodec = avcodec_find_encoder_by_name("mjpeg");
            if(myCodec == NULL) {
                setState("AVCodec library, video codec 'mjpeg' not found");
                close();
                return false;
            }

            if(aPFormatAV == stAV::PIX_FMT::YUVJ420P
            || aPFormatAV == stAV::PIX_FMT::YUVJ422P
            //|| aPFormatAV == stAV::PIX_FMT::YUVJ444P not supported by FFmpeg... yet?
            //|| aPFormatAV == stAV::PIX_FMT::YUVJ440P
               ) {
                anImage.initWrapper(*this);
            } else {
                // convert to compatible pixel format
                PixelFormat aPFrmtTarget = aPFormatAV == stAV::PIX_FMT::YUV420P ? stAV::PIX_FMT::YUVJ420P : stAV::PIX_FMT::YUVJ422P;
                anImage.setColorModel(StImage::ImgColor_YUV);
                anImage.setColorScale(StImage::ImgScale_Mpeg);
                anImage.changePlane(0).initTrash(StImagePlane::ImgGray, getSizeX(), getSizeY(), getAligned(getSizeX()));
                stMemSet(anImage.changePlane(0).changeData(), '\0', anImage.getPlane(0).getSizeBytes());
                anImage.changePlane(1).initTrash(StImagePlane::ImgGray, getSizeX(), getSizeY(), getAligned(getSizeX()));
                stMemSet(anImage.changePlane(1).changeData(), '\0', anImage.getPlane(1).getSizeBytes());
                anImage.changePlane(2).initTrash(StImagePlane::ImgGray, getSizeX(), getSizeY(), getAligned(getSizeX()));
                stMemSet(anImage.changePlane(2).changeData(), '\0', anImage.getPlane(2).getSizeBytes());
                if(!convert(*this,   aPFormatAV,
                            anImage, aPFrmtTarget)) {
                    setState("SWScale library, failed to create SWScaler context");
                    close();
                    return false;
                }
                aPFormatAV = aPFrmtTarget;
            }

        #if(LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(53, 8, 0))
            myCodecCtx = avcodec_alloc_context3(myCodec);
        #else
            myCodecCtx = avcodec_alloc_context();
        #endif
            myCodecCtx->pix_fmt = aPFormatAV;
            myCodecCtx->width   = (int )anImage.getSizeX();
            myCodecCtx->height  = (int )anImage.getSizeY();
            myCodecCtx->time_base.num = 1;
            myCodecCtx->time_base.den = 1;
            myCodecCtx->qmin = myCodecCtx->qmax = 5; // quality factor - lesser is better
            break;
        }
        case ST_TYPE_NONE:
        default:
            close();
            return false;
    }

    // open VIDEO codec
#if(LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(53, 8, 0))
    if(avcodec_open2(myCodecCtx, myCodec, NULL) < 0) {
#else
    if(avcodec_open(myCodecCtx, myCodec) < 0) {
#endif
        setState("AVCodec library, could not open video codec");
        close();
        return false;
    }

    // wrap own data into AVFrame
    myFrame.Frame->format = myCodecCtx->pix_fmt;
    myFrame.Frame->width  = myCodecCtx->width;
    myFrame.Frame->height = myCodecCtx->height;
    fillPointersAV(anImage, myFrame.Frame->data, myFrame.Frame->linesize);

#ifdef ST_AV_NEWSTEREO
    bool isReversed = false;
    AVStereo3DType anAvStereoType = stAV::stereo3dStToAv(theSrcFormat, isReversed);
    if(anAvStereoType != (AVStereo3DType )-1) {
        AVStereo3D* aStereo = av_stereo3d_create_side_data(myFrame.Frame);
        if(aStereo != NULL) {
            aStereo->type = anAvStereoType;
            if(isReversed) {
                aStereo->flags |= AV_STEREO3D_FLAG_INVERT;
            }
        }
    }
#endif

    StJpegParser aRawFile(theFilePath);
    if(!aRawFile.openFile(StRawFile::WRITE)) {
        setState("Can not open the file for writing");
        close();
        return false;
    }

    // allocate the buffer, large enough (stupid formula copied from ffmpeg.c)
    int aBuffSize = int(getSizeX() * getSizeY() * 10);
    aRawFile.initBuffer(aBuffSize);

    // encode the image
    StAVPacket aPacket;
    aPacket.getAVpkt()->data = (uint8_t* )aRawFile.changeBuffer();
    aPacket.getAVpkt()->size = aBuffSize;
#if(LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(54, 2, 100))
    int isGotPacket = 0;
    int anEncSize   = avcodec_encode_video2(myCodecCtx, aPacket.getAVpkt(), myFrame.Frame, &isGotPacket);
    if(anEncSize == 0 && isGotPacket != 0) {
        anEncSize = aPacket.getSize();
    }
#else
    int anEncSize = avcodec_encode_video(myCodecCtx, aPacket.changeData(), aPacket.getSize(), myFrame.Frame);
#endif
    if(anEncSize <= 0) {
        setState("AVCodec library, fail to encode the image");
        close();
        return false;
    }
    aRawFile.setDataSize((size_t )anEncSize);

    // save metadata when possible
    if(theImageType == ST_TYPE_JPEG
    || theImageType == ST_TYPE_JPS) {
        if(aRawFile.parse()) {
            if(theSrcFormat != StFormat_AUTO) {
                aRawFile.setupJps(theSrcFormat);
            }
        } else {
            ST_ERROR_LOG("AVCodec library, created JPEG can not be parsed!");
        }
    }

    // store current content
    aRawFile.writeFile();
    // and finally close the file handle
    aRawFile.closeFile();

    close();

    // set debug information
    StString aDummy, aFileName;
    StFileNode::getFolderAndFile(theFilePath, aDummy, aFileName);
    setState(StString("AVCodec library, saved image '") + aFileName + "' " + getDescription());

    return true;
}
示例#6
0
STDMETHODIMP CDecAvcodec::Decode(const BYTE *buffer, int buflen, REFERENCE_TIME rtStartIn, REFERENCE_TIME rtStopIn, BOOL bSyncPoint, BOOL bDiscontinuity)
{
  CheckPointer(m_pAVCtx, E_UNEXPECTED);

  int     got_picture = 0;
  int     used_bytes  = 0;
  BOOL    bFlush = (buffer == nullptr);
  BOOL    bEndOfSequence = FALSE;

  AVPacket avpkt;
  av_init_packet(&avpkt);

  if (m_pAVCtx->active_thread_type & FF_THREAD_FRAME) {
    if (!m_bFFReordering) {
      m_tcThreadBuffer[m_CurrentThread].rtStart = rtStartIn;
      m_tcThreadBuffer[m_CurrentThread].rtStop  = rtStopIn;
    }

    m_CurrentThread = (m_CurrentThread + 1) % m_pAVCtx->thread_count;
  } else if (m_bBFrameDelay) {
    m_tcBFrameDelay[m_nBFramePos].rtStart = rtStartIn;
    m_tcBFrameDelay[m_nBFramePos].rtStop = rtStopIn;
    m_nBFramePos = !m_nBFramePos;
  }

  uint8_t *pDataBuffer = nullptr;
  if (!bFlush && buflen > 0) {
    if (!m_bInputPadded && (!(m_pAVCtx->active_thread_type & FF_THREAD_FRAME) || m_pParser)) {
      // Copy bitstream into temporary buffer to ensure overread protection
      // Verify buffer size
      if (buflen > m_nFFBufferSize) {
        m_nFFBufferSize	= buflen;
        m_pFFBuffer = (BYTE *)av_realloc_f(m_pFFBuffer, m_nFFBufferSize + FF_INPUT_BUFFER_PADDING_SIZE, 1);
        if (!m_pFFBuffer) {
          m_nFFBufferSize = 0;
          return E_OUTOFMEMORY;
        }
      }
      
      memcpy(m_pFFBuffer, buffer, buflen);
      memset(m_pFFBuffer+buflen, 0, FF_INPUT_BUFFER_PADDING_SIZE);
      pDataBuffer = m_pFFBuffer;
    } else {
      pDataBuffer = (uint8_t *)buffer;
    }

    if (m_nCodecId == AV_CODEC_ID_VP8 && m_bWaitingForKeyFrame) {
      if (!(pDataBuffer[0] & 1)) {
        DbgLog((LOG_TRACE, 10, L"::Decode(): Found VP8 key-frame, resuming decoding"));
        m_bWaitingForKeyFrame = FALSE;
      } else {
        return S_OK;
      }
    }
  }

  while (buflen > 0 || bFlush) {
    REFERENCE_TIME rtStart = rtStartIn, rtStop = rtStopIn;

    if (!bFlush) {
      avpkt.data = pDataBuffer;
      avpkt.size = buflen;
      avpkt.pts = rtStartIn;
      if (rtStartIn != AV_NOPTS_VALUE && rtStopIn != AV_NOPTS_VALUE)
        avpkt.duration = (int)(rtStopIn - rtStartIn);
      else
        avpkt.duration = 0;
      avpkt.flags = AV_PKT_FLAG_KEY;

      if (m_bHasPalette) {
        m_bHasPalette = FALSE;
        uint32_t *pal = (uint32_t *)av_packet_new_side_data(&avpkt, AV_PKT_DATA_PALETTE, AVPALETTE_SIZE);
        int pal_size = FFMIN((1 << m_pAVCtx->bits_per_coded_sample) << 2, m_pAVCtx->extradata_size);
        uint8_t *pal_src = m_pAVCtx->extradata + m_pAVCtx->extradata_size - pal_size;

        for (int i = 0; i < pal_size/4; i++)
          pal[i] = 0xFF<<24 | AV_RL32(pal_src+4*i);
      }
    } else {
      avpkt.data = nullptr;
      avpkt.size = 0;
    }

    // Parse the data if a parser is present
    // This is mandatory for MPEG-1/2
    if (m_pParser) {
      BYTE *pOut = nullptr;
      int pOut_size = 0;

      used_bytes = av_parser_parse2(m_pParser, m_pAVCtx, &pOut, &pOut_size, avpkt.data, avpkt.size, AV_NOPTS_VALUE, AV_NOPTS_VALUE, 0);

      if (used_bytes == 0 && pOut_size == 0 && !bFlush) {
        DbgLog((LOG_TRACE, 50, L"::Decode() - could not process buffer, starving?"));
        break;
      } else if (used_bytes > 0) {
        buflen -= used_bytes;
        pDataBuffer += used_bytes;
      }

      // Update start time cache
      // If more data was read then output, update the cache (incomplete frame)
      // If output is bigger, a frame was completed, update the actual rtStart with the cached value, and then overwrite the cache
      if (used_bytes > pOut_size) {
        if (rtStartIn != AV_NOPTS_VALUE)
          m_rtStartCache = rtStartIn;
      } else if (used_bytes == pOut_size || ((used_bytes + 9) == pOut_size)) {
        // Why +9 above?
        // Well, apparently there are some broken MKV muxers that like to mux the MPEG-2 PICTURE_START_CODE block (which is 9 bytes) in the package with the previous frame
        // This would cause the frame timestamps to be delayed by one frame exactly, and cause timestamp reordering to go wrong.
        // So instead of failing on those samples, lets just assume that 9 bytes are that case exactly.
        m_rtStartCache = rtStartIn = AV_NOPTS_VALUE;
      } else if (pOut_size > used_bytes) {
        rtStart = m_rtStartCache;
        m_rtStartCache = rtStartIn;
        // The value was used once, don't use it for multiple frames, that ends up in weird timings
        rtStartIn = AV_NOPTS_VALUE;
      }

      if (pOut_size > 0 || bFlush) {

        if (pOut && pOut_size > 0) {
          if (pOut_size > m_nFFBufferSize2) {
            m_nFFBufferSize2	= pOut_size;
            m_pFFBuffer2 = (BYTE *)av_realloc_f(m_pFFBuffer2, m_nFFBufferSize2 + FF_INPUT_BUFFER_PADDING_SIZE, 1);
            if (!m_pFFBuffer2) {
              m_nFFBufferSize2 = 0;
              return E_OUTOFMEMORY;
            }
          }
          memcpy(m_pFFBuffer2, pOut, pOut_size);
          memset(m_pFFBuffer2+pOut_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);

          avpkt.data = m_pFFBuffer2;
          avpkt.size = pOut_size;
          avpkt.pts = rtStart;
          avpkt.duration = 0;

          const uint8_t *eosmarker = CheckForEndOfSequence(m_nCodecId, avpkt.data, avpkt.size, &m_MpegParserState);
          if (eosmarker) {
            bEndOfSequence = TRUE;
          }
        } else {
          avpkt.data = nullptr;
          avpkt.size = 0;
        }

        int ret2 = avcodec_decode_video2 (m_pAVCtx, m_pFrame, &got_picture, &avpkt);
        if (ret2 < 0) {
          DbgLog((LOG_TRACE, 50, L"::Decode() - decoding failed despite successfull parsing"));
          got_picture = 0;
        }
      } else {
        got_picture = 0;
      }
    } else {
      used_bytes = avcodec_decode_video2 (m_pAVCtx, m_pFrame, &got_picture, &avpkt);
      buflen = 0;
    }

    if (FAILED(PostDecode())) {
      av_frame_unref(m_pFrame);
      return E_FAIL;
    }

    // Decoding of this frame failed ... oh well!
    if (used_bytes < 0) {
      av_frame_unref(m_pFrame);
      return S_OK;
    }

    // Judge frame usability
    // This determines if a frame is artifact free and can be delivered.
    if (m_bResumeAtKeyFrame) {
      if (m_bWaitingForKeyFrame && got_picture) {
        if (m_pFrame->key_frame) {
          DbgLog((LOG_TRACE, 50, L"::Decode() - Found Key-Frame, resuming decoding at %I64d", m_pFrame->pkt_pts));
          m_bWaitingForKeyFrame = FALSE;
        } else {
          got_picture = 0;
        }
      }
    }

    // Handle B-frame delay for frame threading codecs
    if ((m_pAVCtx->active_thread_type & FF_THREAD_FRAME) && m_bBFrameDelay) {
      m_tcBFrameDelay[m_nBFramePos] = m_tcThreadBuffer[m_CurrentThread];
      m_nBFramePos = !m_nBFramePos;
    }

    if (!got_picture || !m_pFrame->data[0]) {
      if (!avpkt.size)
        bFlush = FALSE; // End flushing, no more frames
      av_frame_unref(m_pFrame);
      continue;
    }

    ///////////////////////////////////////////////////////////////////////////////////////////////
    // Determine the proper timestamps for the frame, based on different possible flags.
    ///////////////////////////////////////////////////////////////////////////////////////////////
    if (m_bFFReordering) {
      rtStart = m_pFrame->pkt_pts;
      if (m_pFrame->pkt_duration)
        rtStop = m_pFrame->pkt_pts + m_pFrame->pkt_duration;
      else
        rtStop = AV_NOPTS_VALUE;
    } else if (m_bBFrameDelay && m_pAVCtx->has_b_frames) {
      rtStart = m_tcBFrameDelay[m_nBFramePos].rtStart;
      rtStop  = m_tcBFrameDelay[m_nBFramePos].rtStop;
    } else if (m_pAVCtx->active_thread_type & FF_THREAD_FRAME) {
      unsigned index = m_CurrentThread;
      rtStart = m_tcThreadBuffer[index].rtStart;
      rtStop  = m_tcThreadBuffer[index].rtStop;
    }

    if (m_bRVDropBFrameTimings && m_pFrame->pict_type == AV_PICTURE_TYPE_B) {
      rtStart = AV_NOPTS_VALUE;
    }

    if (m_bCalculateStopTime)
      rtStop = AV_NOPTS_VALUE;

    ///////////////////////////////////////////////////////////////////////////////////////////////
    // All required values collected, deliver the frame
    ///////////////////////////////////////////////////////////////////////////////////////////////
    LAVFrame *pOutFrame = nullptr;
    AllocateFrame(&pOutFrame);

    AVRational display_aspect_ratio;
    int64_t num = (int64_t)m_pFrame->sample_aspect_ratio.num * m_pFrame->width;
    int64_t den = (int64_t)m_pFrame->sample_aspect_ratio.den * m_pFrame->height;
    av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den, num, den, INT_MAX);

    pOutFrame->width        = m_pFrame->width;
    pOutFrame->height       = m_pFrame->height;
    pOutFrame->aspect_ratio = display_aspect_ratio;
    pOutFrame->repeat       = m_pFrame->repeat_pict;
    pOutFrame->key_frame    = m_pFrame->key_frame;
    pOutFrame->frame_type   = av_get_picture_type_char(m_pFrame->pict_type);
    pOutFrame->ext_format   = GetDXVA2ExtendedFlags(m_pAVCtx, m_pFrame);

    if (m_pFrame->interlaced_frame || (!m_pAVCtx->progressive_sequence && (m_nCodecId == AV_CODEC_ID_H264 || m_nCodecId == AV_CODEC_ID_MPEG2VIDEO)))
      m_iInterlaced = 1;
    else if (m_pAVCtx->progressive_sequence)
      m_iInterlaced = 0;

    if ((m_nCodecId == AV_CODEC_ID_H264 || m_nCodecId == AV_CODEC_ID_MPEG2VIDEO) && m_pFrame->repeat_pict)
      m_nSoftTelecine = 2;
    else if (m_nSoftTelecine > 0)
      m_nSoftTelecine--;

    // Don't apply aggressive deinterlacing to content that looks soft-telecined, as it would destroy the content
    bool bAggressiveFlag    = (m_iInterlaced == 1 && m_pSettings->GetDeinterlacingMode() == DeintMode_Aggressive) && !m_nSoftTelecine;

    pOutFrame->interlaced   = (m_pFrame->interlaced_frame || bAggressiveFlag || m_pSettings->GetDeinterlacingMode() == DeintMode_Force) && !(m_pSettings->GetDeinterlacingMode() == DeintMode_Disable);

    LAVDeintFieldOrder fo   = m_pSettings->GetDeintFieldOrder();
    pOutFrame->tff          = (fo == DeintFieldOrder_Auto) ? m_pFrame->top_field_first : (fo == DeintFieldOrder_TopFieldFirst);

    pOutFrame->rtStart      = rtStart;
    pOutFrame->rtStop       = rtStop;

    PixelFormatMapping map  = getPixFmtMapping((AVPixelFormat)m_pFrame->format);
    pOutFrame->format       = map.lavpixfmt;
    pOutFrame->bpp          = map.bpp;

    if (m_nCodecId == AV_CODEC_ID_MPEG2VIDEO || m_nCodecId == AV_CODEC_ID_MPEG1VIDEO)
      pOutFrame->avgFrameDuration = GetFrameDuration();

    AVFrameSideData * sdHDR = av_frame_get_side_data(m_pFrame, AV_FRAME_DATA_HDR_MASTERING_INFO);
    if (sdHDR) {
      if (sdHDR->size == 24) {
        MediaSideDataHDR * hdr = (MediaSideDataHDR *)AddLAVFrameSideData(pOutFrame, IID_MediaSideDataHDR, sizeof(MediaSideDataHDR));
        if (hdr) {
          CByteParser hdrParser(sdHDR->data, sdHDR->size);
          for (int i = 0; i < 3; i++)
          {
            hdr->display_primaries_x[i] = hdrParser.BitRead(16) * 0.00002;
            hdr->display_primaries_y[i] = hdrParser.BitRead(16) * 0.00002;
          }
          hdr->white_point_x = hdrParser.BitRead(16) * 0.00002;
          hdr->white_point_y = hdrParser.BitRead(16) * 0.00002;
          hdr->max_display_mastering_luminance = hdrParser.BitRead(32) * 0.0001;
          hdr->min_display_mastering_luminance = hdrParser.BitRead(32) * 0.0001;
        }
      }
      else {
        DbgLog((LOG_TRACE, 10, L"::Decode(): Found HDR data of an unexpected size (%d)", sdHDR->size));
      }
    }

    if (map.conversion) {
      ConvertPixFmt(m_pFrame, pOutFrame);
    } else {
      AVFrame *pFrameRef = av_frame_alloc();
      av_frame_ref(pFrameRef, m_pFrame);

      for (int i = 0; i < 4; i++) {
        pOutFrame->data[i]   = pFrameRef->data[i];
        pOutFrame->stride[i] = pFrameRef->linesize[i];
      }

      pOutFrame->priv_data = pFrameRef;
      pOutFrame->destruct = lav_avframe_free;

      // Check alignment on rawvideo, which can be off depending on the source file
      if (m_nCodecId == AV_CODEC_ID_RAWVIDEO) {
        for (int i = 0; i < 4; i++) {
          if ((intptr_t)pOutFrame->data[i] % 16u || pOutFrame->stride[i] % 16u) {
            // copy the frame, its not aligned properly and would crash later
            CopyLAVFrameInPlace(pOutFrame);
            break;
          }
        }
      }
    }

    if (bEndOfSequence)
      pOutFrame->flags |= LAV_FRAME_FLAG_END_OF_SEQUENCE;

    if (pOutFrame->format == LAVPixFmt_DXVA2) {
      pOutFrame->data[0] = m_pFrame->data[4];
      HandleDXVA2Frame(pOutFrame);
    } else {
      Deliver(pOutFrame);
    }

    if (bEndOfSequence) {
      bEndOfSequence = FALSE;
      if (pOutFrame->format == LAVPixFmt_DXVA2) {
        HandleDXVA2Frame(m_pCallback->GetFlushFrame());
      } else {
        Deliver(m_pCallback->GetFlushFrame());
      }
    }

    if (bFlush) {
      m_CurrentThread = (m_CurrentThread + 1) % m_pAVCtx->thread_count;
    }
    av_frame_unref(m_pFrame);
  }

  return S_OK;
}