double movie_player::get_presentation_time_for_frame(AVFrame* frame, int streamIndex) const { int64_t pts; #ifdef CORSIX_TH_USE_LIBAV pts = frame->pts; if (pts == AV_NOPTS_VALUE) { pts = frame->pkt_dts; } #else #if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(54, 18, 100) pts = *(int64_t*)av_opt_ptr(avcodec_get_frame_class(), frame, "best_effort_timestamp"); #elif LIBAVCODEC_VERSION_INT < AV_VERSION_INT(58, 18, 100) pts = av_frame_get_best_effort_timestamp(frame); #else pts = frame->best_effort_timestamp; #endif //LIBAVCODEC_VERSION_INT #endif //CORSIX_T_USE_LIBAV if (pts == AV_NOPTS_VALUE) { pts = 0; } return pts * av_q2d(format_context->streams[streamIndex]->time_base); }
int THMovie::getVideoFrame(AVFrame *pFrame, int64_t *piPts) { int iGotPicture = 0; int iError; AVPacket *pPacket = m_pVideoQueue->pull(true); if(pPacket == nullptr) { return -1; } if(pPacket->data == m_flushPacket->data) { //TODO: Flush return 0; } iError = avcodec_decode_video2(m_pVideoCodecContext, pFrame, &iGotPicture, pPacket); av_packet_unref(pPacket); av_free(pPacket); if(iError < 0) { return 0; } if(iGotPicture) { iError = 1; #ifdef CORSIX_TH_USE_LIBAV *piPts = pFrame->pts; if (*piPts == AV_NOPTS_VALUE) { *piPts = pFrame->pkt_dts; } #else #if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(54, 18, 100) *piPts = *(int64_t*)av_opt_ptr(avcodec_get_frame_class(), pFrame, "best_effort_timestamp"); #else *piPts = av_frame_get_best_effort_timestamp(pFrame); #endif //LIBAVCODEC_VERSION_INT #endif //CORSIX_T_USE_LIBAV if(*piPts == AV_NOPTS_VALUE) { *piPts = 0; } return iError; } return 0; }
/* "user interface" functions */ static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output) { char buf[256]; int flags = (is_output ? ic->oformat->flags : ic->iformat->flags); AVStream *st = ic->streams[i]; AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL, 0); char *separator = ic->dump_separator; char **codec_separator = av_opt_ptr(st->codec->av_class, st->codec, "dump_separator"); int use_format_separator = !*codec_separator; if (use_format_separator) *codec_separator = av_strdup(separator); avcodec_string(buf, sizeof(buf), st->codec, is_output); if (use_format_separator) av_freep(codec_separator); av_log(NULL, AV_LOG_INFO, " Stream #%d:%d", index, i); /* the pid is an important information, so we display it */ /* XXX: add a generic system */ if (flags & AVFMT_SHOW_IDS) av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id); if (lang) av_log(NULL, AV_LOG_INFO, "(%s)", lang->value); av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames, st->time_base.num, st->time_base.den); av_log(NULL, AV_LOG_INFO, ": %s", buf); if (st->sample_aspect_ratio.num && // default av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) { AVRational display_aspect_ratio; av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den, st->codec->width * st->sample_aspect_ratio.num, st->codec->height * st->sample_aspect_ratio.den, 1024 * 1024); av_log(NULL, AV_LOG_INFO, ", SAR %d:%d DAR %d:%d", st->sample_aspect_ratio.num, st->sample_aspect_ratio.den, display_aspect_ratio.num, display_aspect_ratio.den); } if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) { int fps = st->avg_frame_rate.den && st->avg_frame_rate.num; int tbr = st->r_frame_rate.den && st->r_frame_rate.num; int tbn = st->time_base.den && st->time_base.num; int tbc = st->codec->time_base.den && st->codec->time_base.num; if (fps || tbr || tbn || tbc) av_log(NULL, AV_LOG_INFO, "%s", separator); if (fps) print_fps(av_q2d(st->avg_frame_rate), tbr || tbn || tbc ? "fps, " : "fps"); if (tbr) print_fps(av_q2d(st->r_frame_rate), tbn || tbc ? "tbr, " : "tbr"); if (tbn) print_fps(1 / av_q2d(st->time_base), tbc ? "tbn, " : "tbn"); if (tbc) print_fps(1 / av_q2d(st->codec->time_base), "tbc"); } if (st->disposition & AV_DISPOSITION_DEFAULT) av_log(NULL, AV_LOG_INFO, " (default)"); if (st->disposition & AV_DISPOSITION_DUB) av_log(NULL, AV_LOG_INFO, " (dub)"); if (st->disposition & AV_DISPOSITION_ORIGINAL) av_log(NULL, AV_LOG_INFO, " (original)"); if (st->disposition & AV_DISPOSITION_COMMENT) av_log(NULL, AV_LOG_INFO, " (comment)"); if (st->disposition & AV_DISPOSITION_LYRICS) av_log(NULL, AV_LOG_INFO, " (lyrics)"); if (st->disposition & AV_DISPOSITION_KARAOKE) av_log(NULL, AV_LOG_INFO, " (karaoke)"); if (st->disposition & AV_DISPOSITION_FORCED) av_log(NULL, AV_LOG_INFO, " (forced)"); if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED) av_log(NULL, AV_LOG_INFO, " (hearing impaired)"); if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED) av_log(NULL, AV_LOG_INFO, " (visual impaired)"); if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS) av_log(NULL, AV_LOG_INFO, " (clean effects)"); av_log(NULL, AV_LOG_INFO, "\n"); dump_metadata(NULL, st->metadata, " "); dump_sidedata(NULL, st, " "); }
void SoftFFmpegVideo::onQueueFilled(OMX_U32 portIndex) { int err = 0; if (mSignalledError || mOutputPortSettingsChange != NONE) { return; } List<BufferInfo *> &inQueue = getPortQueue(0); List<BufferInfo *> &outQueue = getPortQueue(1); while (!inQueue.empty() && !outQueue.empty()) { BufferInfo *inInfo = *inQueue.begin(); OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader; BufferInfo *outInfo = *outQueue.begin(); OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader; if (mCtx->width != mWidth || mCtx->height != mHeight) { mCtx->width = mWidth; mCtx->height = mHeight; mStride = mWidth; updatePortDefinitions(); notify(OMX_EventPortSettingsChanged, 1, 0, NULL); mOutputPortSettingsChange = AWAITING_DISABLED; return; } if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) { inQueue.erase(inQueue.begin()); inInfo->mOwnedByUs = false; notifyEmptyBufferDone(inHeader); outHeader->nFilledLen = 0; outHeader->nFlags = OMX_BUFFERFLAG_EOS; outQueue.erase(outQueue.begin()); outInfo->mOwnedByUs = false; notifyFillBufferDone(outHeader); return; } if (inHeader->nFlags & OMX_BUFFERFLAG_CODECCONFIG) { LOGI("got extradata, ignore: %d, size: %lu", mIgnoreExtradata, inHeader->nFilledLen); hexdump(inHeader->pBuffer + inHeader->nOffset, inHeader->nFilledLen); if (!mExtradataReady && !mIgnoreExtradata) { //if (mMode == MODE_H264) // it is possible to receive multiple input buffer with OMX_BUFFERFLAG_CODECCONFIG flag. // for example, H264, the first input buffer is SPS, and another is PPS! int orig_extradata_size = mCtx->extradata_size; mCtx->extradata_size += inHeader->nFilledLen; mCtx->extradata = (uint8_t *)realloc(mCtx->extradata, mCtx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE); if (!mCtx->extradata) { LOGE("ffmpeg video decoder failed to alloc extradata memory."); notify(OMX_EventError, OMX_ErrorInsufficientResources, 0, NULL); mSignalledError = true; return; } memcpy(mCtx->extradata + orig_extradata_size, inHeader->pBuffer + inHeader->nOffset, inHeader->nFilledLen); memset(mCtx->extradata + mCtx->extradata_size, 0, FF_INPUT_BUFFER_PADDING_SIZE); inInfo->mOwnedByUs = false; inQueue.erase(inQueue.begin()); inInfo = NULL; notifyEmptyBufferDone(inHeader); inHeader = NULL; continue; } if (mIgnoreExtradata) { LOGI("got extradata, size: %lu, but ignore it", inHeader->nFilledLen); inInfo->mOwnedByUs = false; inQueue.erase(inQueue.begin()); inInfo = NULL; notifyEmptyBufferDone(inHeader); inHeader = NULL; continue; } } AVPacket pkt; av_init_packet(&pkt); pkt.data = (uint8_t *)inHeader->pBuffer + inHeader->nOffset; pkt.size = inHeader->nFilledLen; pkt.pts = inHeader->nTimeStamp; #if DEBUG_PKT LOGV("pkt size: %d, pts: %lld", pkt.size, pkt.pts); #endif if (!mExtradataReady) { LOGI("extradata is ready"); hexdump(mCtx->extradata, mCtx->extradata_size); LOGI("open ffmpeg decoder now"); mExtradataReady = true; err = avcodec_open2(mCtx, mCtx->codec, NULL); if (err < 0) { LOGE("ffmpeg video decoder failed to initialize. (%d)", err); notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL); mSignalledError = true; return; } } int gotPic = false; AVFrame *frame = avcodec_alloc_frame(); err = avcodec_decode_video2(mCtx, frame, &gotPic, &pkt); if (err < 0) { LOGE("ffmpeg video decoder failed to decode frame. (%d)", err); notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL); mSignalledError = true; av_free(frame); return; } if (gotPic) { AVPicture pict; int64_t pts = AV_NOPTS_VALUE; uint8_t *dst = outHeader->pBuffer; memset(&pict, 0, sizeof(AVPicture)); pict.data[0] = dst; pict.data[1] = dst + mStride * mHeight; pict.data[2] = pict.data[1] + (mStride / 2 * mHeight / 2); pict.linesize[0] = mStride; pict.linesize[1] = mStride / 2; pict.linesize[2] = mStride / 2; int sws_flags = SWS_BICUBIC; mImgConvertCtx = sws_getCachedContext(mImgConvertCtx, mWidth, mHeight, mCtx->pix_fmt, mWidth, mHeight, PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL); if (mImgConvertCtx == NULL) { LOGE("Cannot initialize the conversion context"); notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL); mSignalledError = true; av_free(frame); return; } sws_scale(mImgConvertCtx, frame->data, frame->linesize, 0, mHeight, pict.data, pict.linesize); outHeader->nOffset = 0; outHeader->nFilledLen = (mStride * mHeight * 3) / 2; outHeader->nFlags = 0; if (frame->key_frame) outHeader->nFlags |= OMX_BUFFERFLAG_SYNCFRAME; // process timestamps if (decoder_reorder_pts == -1) { pts = *(int64_t*)av_opt_ptr(avcodec_get_frame_class(), frame, "best_effort_timestamp"); } else if (decoder_reorder_pts) { pts = frame->pkt_pts; } else { pts = frame->pkt_dts; } if (pts == AV_NOPTS_VALUE) { pts = 0; } outHeader->nTimeStamp = pts; #if DEBUG_FRM LOGV("frame pts: %lld", pts); #endif outInfo->mOwnedByUs = false; outQueue.erase(outQueue.begin()); outInfo = NULL; notifyFillBufferDone(outHeader); outHeader = NULL; } inInfo->mOwnedByUs = false; inQueue.erase(inQueue.begin()); inInfo = NULL; notifyEmptyBufferDone(inHeader); inHeader = NULL; av_free(frame); } }