void film::update_metadata () { char buf[256]; /* Video metadata */ if (videoStream != -1) { this->height = int (pFormatCtx->streams[videoStream]->codec->height); this->width = int (pFormatCtx->streams[videoStream]->codec->width); this->fps = av_q2d (pFormatCtx->streams[videoStream]->r_frame_rate); avcodec_string (buf, sizeof (buf), pFormatCtx->streams[videoStream]->codec, 0); this->codec.video = buf; } else { this->codec.video = "null"; this->height = 0; this->width = 0; this->fps = 0; } /* Audio metadata */ if (audioStream != -1) { avcodec_string (buf, sizeof (buf), pCodecCtxAudio, 0); this->codec.audio = buf; this->nchannel = pCodecCtxAudio->channels; this->samplerate = pCodecCtxAudio->sample_rate; } else { this->codec.audio = "null"; this->nchannel = 0; this->samplerate = 0; } duration.secs = pFormatCtx->duration / AV_TIME_BASE; duration.us = pFormatCtx->duration % AV_TIME_BASE; duration.mstotal = int (duration.secs * 1000 + duration.us / 1000); duration.mins = duration.secs / 60; duration.secs %= 60; duration.hours = duration.mins / 60; duration.mins %= 60; }
void RtspStreamWorker::openCodecs(AVFormatContext *context, AVDictionary *options) { for (unsigned int i = 0; i < context->nb_streams; i++) { AVStream *stream = context->streams[i]; bool codecOpened = openCodec(stream, options); if (!codecOpened) { qDebug() << "RtspStream: cannot find decoder for stream" << i << "codec" << stream->codec->codec_id; continue; } char info[512]; avcodec_string(info, sizeof(info), stream->codec, 0); qDebug() << "RtspStream: stream #" << i << ":" << info; } }
void RtspStreamWorker::openCodecs(AVFormatContext *context, AVDictionary *options) { for (unsigned int i = 0; i < context->nb_streams; i++) { qDebug() << "processing stream id " << i; AVStream *stream = context->streams[i]; bool codecOpened = openCodec(stream, options); if (!codecOpened) { qDebug() << "RtspStream: cannot find decoder for stream" << i << "codec" << stream->codec->codec_id; continue; } if (stream->codec->codec_type==AVMEDIA_TYPE_VIDEO) m_videoStreamIndex = i; if (stream->codec->codec_type==AVMEDIA_TYPE_AUDIO) m_audioStreamIndex = i; char info[512]; avcodec_string(info, sizeof(info), stream->codec, 0); qDebug() << "RtspStream: stream #" << i << ":" << info; } if (m_audioStreamIndex > -1) { qDebug() << "audio stream time base " << context->streams[m_audioStreamIndex]->codec->time_base.num << "/" << context->streams[m_audioStreamIndex]->codec->time_base.den; emit audioFormat(context->streams[m_audioStreamIndex]->codec->sample_fmt, context->streams[m_audioStreamIndex]->codec->channels, context->streams[m_audioStreamIndex]->codec->sample_rate); } qDebug() << "video stream index: " << m_videoStreamIndex; qDebug() << "audio steam index: " << m_audioStreamIndex; }
int do_calculate_dr(const char *filename) { struct stream_context sc; struct dr_meter meter; int err; meter_init(&meter); err = sc_open(&sc, filename); if (err < 0) { return print_av_error("sc_open", err); } int stream_index = err = av_find_best_stream( sc.format_ctx, AVMEDIA_TYPE_AUDIO, -1, -1, NULL, 0); if (err < 0) { print_av_error("av_find_best_stream", err); goto cleanup; } err = sc_start_stream(&sc, stream_index); if (err < 0) { print_av_error("sc_start_stream", err); goto cleanup; } // Print out the stream info AVCodecContext *codec_ctx = sc_get_codec(&sc); char codecinfobuf[256]; avcodec_string(codecinfobuf, sizeof(codecinfobuf), codec_ctx, 0); fprintf(stderr, "%.256s\n", codecinfobuf); err = meter_start(&meter, codec_ctx->channels, codec_ctx->sample_rate, codec_ctx->sample_fmt); if (err) { goto cleanup; } fprintf(stderr, "Collecting fragments information...\n"); size_t fragment = 0; int throbbler_stage = 0; while (!sc_eof(&sc)) { err = sc_get_next_frame(&sc); if (err < 0) { print_av_error("sc_get_next_frame", err); goto cleanup; } err = meter_feed(&meter, sc.buf, sc.buf_size); if (err) { goto cleanup; } if (fragment < meter.fragment) { fragment = meter.fragment; if ((throbbler_stage % 4) == 0) { fprintf(stderr, "\033[1K\033[1G %c %2i:%02i ", throbbler[throbbler_stage / 4], (fragment * 3) / 60, (fragment * 3) % 60); } throbbler_stage += 1; throbbler_stage %= 16; } } meter_finish(&meter); cleanup: meter_free(&meter); sc_close(&sc); if (err < 0) { return err; } return 0; }
/* "user interface" functions */ static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output) { char buf[256]; int flags = (is_output ? ic->oformat->flags : ic->iformat->flags); AVStream *st = ic->streams[i]; int g = av_gcd(st->time_base.num, st->time_base.den); AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL, 0); avcodec_string(buf, sizeof(buf), st->codec, is_output); av_log(NULL, AV_LOG_INFO, " Stream #%d:%d", index, i); /* the pid is an important information, so we display it */ /* XXX: add a generic system */ if (flags & AVFMT_SHOW_IDS) av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id); if (lang) av_log(NULL, AV_LOG_INFO, "(%s)", lang->value); av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames, st->time_base.num / g, st->time_base.den / g); av_log(NULL, AV_LOG_INFO, ": %s", buf); if (st->sample_aspect_ratio.num && // default av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) { AVRational display_aspect_ratio; av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den, st->codec->width * st->sample_aspect_ratio.num, st->codec->height * st->sample_aspect_ratio.den, 1024 * 1024); av_log(NULL, AV_LOG_INFO, ", SAR %d:%d DAR %d:%d", st->sample_aspect_ratio.num, st->sample_aspect_ratio.den, display_aspect_ratio.num, display_aspect_ratio.den); } if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) { if (st->avg_frame_rate.den && st->avg_frame_rate.num) print_fps(av_q2d(st->avg_frame_rate), "fps"); #if FF_API_R_FRAME_RATE if (st->r_frame_rate.den && st->r_frame_rate.num) print_fps(av_q2d(st->r_frame_rate), "tbr"); #endif if (st->time_base.den && st->time_base.num) print_fps(1 / av_q2d(st->time_base), "tbn"); if (st->codec->time_base.den && st->codec->time_base.num) print_fps(1 / av_q2d(st->codec->time_base), "tbc"); } if (st->disposition & AV_DISPOSITION_DEFAULT) av_log(NULL, AV_LOG_INFO, " (default)"); if (st->disposition & AV_DISPOSITION_DUB) av_log(NULL, AV_LOG_INFO, " (dub)"); if (st->disposition & AV_DISPOSITION_ORIGINAL) av_log(NULL, AV_LOG_INFO, " (original)"); if (st->disposition & AV_DISPOSITION_COMMENT) av_log(NULL, AV_LOG_INFO, " (comment)"); if (st->disposition & AV_DISPOSITION_LYRICS) av_log(NULL, AV_LOG_INFO, " (lyrics)"); if (st->disposition & AV_DISPOSITION_KARAOKE) av_log(NULL, AV_LOG_INFO, " (karaoke)"); if (st->disposition & AV_DISPOSITION_FORCED) av_log(NULL, AV_LOG_INFO, " (forced)"); if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED) av_log(NULL, AV_LOG_INFO, " (hearing impaired)"); if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED) av_log(NULL, AV_LOG_INFO, " (visual impaired)"); if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS) av_log(NULL, AV_LOG_INFO, " (clean effects)"); av_log(NULL, AV_LOG_INFO, "\n"); dump_metadata(NULL, st->metadata, " "); dump_sidedata(NULL, st, " "); }
wxString wxFfmpegMediaDecoder::GetCodecName(unsigned int streamIndex) { char buf[256]; avcodec_string(buf, sizeof(buf), m_formatCtx->streams[streamIndex]->codec, false); wxString name = wxString(buf, wxConvLocal).BeforeFirst(wxT(',')); return name.Index(wxT(':')) > 0 ? name.AfterFirst(wxT(':')).Trim(false) : name; }
/* "user interface" functions */ static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output) { char buf[256]; int flags = (is_output ? ic->oformat->flags : ic->iformat->flags); AVStream *st = ic->streams[i]; AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL, 0); char *separator = ic->dump_separator; AVCodecContext *avctx; int ret; avctx = avcodec_alloc_context3(NULL); if (!avctx) return; ret = avcodec_parameters_to_context(avctx, st->codecpar); if (ret < 0) { avcodec_free_context(&avctx); return; } // Fields which are missing from AVCodecParameters need to be taken from the AVCodecContext avctx->properties = st->codec->properties; avctx->codec = st->codec->codec; avctx->qmin = st->codec->qmin; avctx->qmax = st->codec->qmax; avctx->coded_width = st->codec->coded_width; avctx->coded_height = st->codec->coded_height; if (separator) av_opt_set(avctx, "dump_separator", separator, 0); avcodec_string(buf, sizeof(buf), avctx, is_output); avcodec_free_context(&avctx); av_log(NULL, AV_LOG_INFO, " Stream #%d:%d", index, i); /* the pid is an important information, so we display it */ /* XXX: add a generic system */ if (flags & AVFMT_SHOW_IDS) av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id); if (lang) av_log(NULL, AV_LOG_INFO, "(%s)", lang->value); av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames, st->time_base.num, st->time_base.den); av_log(NULL, AV_LOG_INFO, ": %s", buf); if (st->sample_aspect_ratio.num && av_cmp_q(st->sample_aspect_ratio, st->codecpar->sample_aspect_ratio)) { AVRational display_aspect_ratio; av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den, st->codecpar->width * (int64_t)st->sample_aspect_ratio.num, st->codecpar->height * (int64_t)st->sample_aspect_ratio.den, 1024 * 1024); av_log(NULL, AV_LOG_INFO, ", SAR %d:%d DAR %d:%d", st->sample_aspect_ratio.num, st->sample_aspect_ratio.den, display_aspect_ratio.num, display_aspect_ratio.den); } if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { int fps = st->avg_frame_rate.den && st->avg_frame_rate.num; int tbr = st->r_frame_rate.den && st->r_frame_rate.num; int tbn = st->time_base.den && st->time_base.num; int tbc = st->codec->time_base.den && st->codec->time_base.num; if (fps || tbr || tbn || tbc) av_log(NULL, AV_LOG_INFO, "%s", separator); if (fps) print_fps(av_q2d(st->avg_frame_rate), tbr || tbn || tbc ? "fps, " : "fps"); if (tbr) print_fps(av_q2d(st->r_frame_rate), tbn || tbc ? "tbr, " : "tbr"); if (tbn) print_fps(1 / av_q2d(st->time_base), tbc ? "tbn, " : "tbn"); if (tbc) print_fps(1 / av_q2d(st->codec->time_base), "tbc"); } if (st->disposition & AV_DISPOSITION_DEFAULT) av_log(NULL, AV_LOG_INFO, " (default)"); if (st->disposition & AV_DISPOSITION_DUB) av_log(NULL, AV_LOG_INFO, " (dub)"); if (st->disposition & AV_DISPOSITION_ORIGINAL) av_log(NULL, AV_LOG_INFO, " (original)"); if (st->disposition & AV_DISPOSITION_COMMENT) av_log(NULL, AV_LOG_INFO, " (comment)"); if (st->disposition & AV_DISPOSITION_LYRICS) av_log(NULL, AV_LOG_INFO, " (lyrics)"); if (st->disposition & AV_DISPOSITION_KARAOKE) av_log(NULL, AV_LOG_INFO, " (karaoke)"); if (st->disposition & AV_DISPOSITION_FORCED) av_log(NULL, AV_LOG_INFO, " (forced)"); if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED) av_log(NULL, AV_LOG_INFO, " (hearing impaired)"); if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED) av_log(NULL, AV_LOG_INFO, " (visual impaired)"); if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS) av_log(NULL, AV_LOG_INFO, " (clean effects)"); if (st->disposition & AV_DISPOSITION_DESCRIPTIONS) av_log(NULL, AV_LOG_INFO, " (descriptions)"); if (st->disposition & AV_DISPOSITION_DEPENDENT) av_log(NULL, AV_LOG_INFO, " (dependent)"); av_log(NULL, AV_LOG_INFO, "\n"); dump_metadata(NULL, st->metadata, " "); dump_sidedata(NULL, st, " "); }
void get_av_info(void *arg) { Media *media = (Media *)arg; int is_output = false; //make msg short avcodec_string(media->info, sizeof(media->info), media->codec_ctx, is_output); LOGI("%s", media->info); }
int TFfmpeg::init(const char *mrl) { av_register_all(); // Open video file if(av_open_input_file(&this->pFormatCtx, mrl, NULL, 0, NULL) != 0) return CANT_OPEN_FILE; // Couldn't open file // Retrieve stream information if(av_find_stream_info(this->pFormatCtx) < 0) return CANT_FIND_VIDEO_CODEC; // Couldn't find stream information dump_format(this->pFormatCtx, 0, mrl, 0); int i; // Find the first video stream videoStream = -1; for(i = 0; i < this->pFormatCtx->nb_streams; i++) { if(this->pFormatCtx->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO) { videoStream = i; break; } } if(videoStream == -1) return CANT_FIND_VIDEO_CODEC; // Didn't find a video stream // Get a pointer to the codec context for the video stream this->pCodecCtx = pFormatCtx->streams[videoStream]->codec; // Find the decoder for the video stream this->pCodec = avcodec_find_decoder(pCodecCtx->codec_id); char buf[256]; avcodec_string(buf, sizeof(buf), pCodecCtx, 0); double fps = av_q2d (pFormatCtx->streams[videoStream]->r_frame_rate); if(this->pCodec == NULL) { fprintf(stderr, "Unsupported codec!\n"); return CANT_FIND_VIDEO_CODEC; // Codec not found } // Open codec if(avcodec_open(pCodecCtx, pCodec) < 0) return CANT_FIND_VIDEO_CODEC; // Could not open codec // Allocate video frame this->pFrame = avcodec_alloc_frame(); // Allocate an AVFrame structure this->pFrameRGB = avcodec_alloc_frame(); if(this->pFrameRGB == NULL) return CANT_FIND_VIDEO_CODEC; int numBytes; // Determine required buffer size and allocate buffer numBytes = avpicture_get_size(PIX_FMT_RGBA32, pCodecCtx->width, this->pCodecCtx->height); this->buffer = (uint8_t *)av_malloc(numBytes * sizeof(uint8_t)); // Assign appropriate parts of buffer to image planes in pFrameRGB // Note that pFrameRGB is an AVFrame, but AVFrame is a superset // of AVPicture avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGBA32, pCodecCtx->width, pCodecCtx->height); }
static int CalcTrackLength(const MythUtilCommandLineParser &cmdline) { if (cmdline.toString("songid").isEmpty()) { LOG(VB_GENERAL, LOG_ERR, "Missing --songid option"); return GENERIC_EXIT_INVALID_CMDLINE; } int songID = cmdline.toInt("songid"); MusicMetadata *mdata = MusicMetadata::createFromID(songID); if (!mdata) { LOG(VB_GENERAL, LOG_ERR, QString("Cannot find metadata for trackid: %1").arg(songID)); return GENERIC_EXIT_NOT_OK; } QString musicFile = mdata->getLocalFilename(); if (musicFile.isEmpty() || !QFile::exists(musicFile)) { LOG(VB_GENERAL, LOG_ERR, QString("Cannot find file for trackid: %1").arg(songID)); return GENERIC_EXIT_NOT_OK; } av_register_all(); AVFormatContext *inputFC = NULL; AVInputFormat *fmt = NULL; // Open track LOG(VB_GENERAL, LOG_DEBUG, QString("CalcTrackLength: Opening '%1'") .arg(musicFile)); QByteArray inFileBA = musicFile.toLocal8Bit(); int ret = avformat_open_input(&inputFC, inFileBA.constData(), fmt, NULL); if (ret) { LOG(VB_GENERAL, LOG_ERR, "CalcTrackLength: Couldn't open input file" + ENO); return GENERIC_EXIT_NOT_OK; } // Getting stream information ret = avformat_find_stream_info(inputFC, NULL); if (ret < 0) { LOG(VB_GENERAL, LOG_ERR, QString("CalcTrackLength: Couldn't get stream info, error #%1").arg(ret)); avformat_close_input(&inputFC); inputFC = NULL; return GENERIC_EXIT_NOT_OK;; } int duration = 0; long long time = 0; for (uint i = 0; i < inputFC->nb_streams; i++) { AVStream *st = inputFC->streams[i]; char buf[256]; avcodec_string(buf, sizeof(buf), st->codec, false); switch (inputFC->streams[i]->codec->codec_type) { case AVMEDIA_TYPE_AUDIO: { AVPacket pkt; av_init_packet(&pkt); while (av_read_frame(inputFC, &pkt) >= 0) { if (pkt.stream_index == (int)i) time = time + pkt.duration; av_free_packet(&pkt); } duration = time * av_q2d(inputFC->streams[i]->time_base); break; } default: LOG(VB_GENERAL, LOG_ERR, QString("Skipping unsupported codec %1 on stream %2") .arg(inputFC->streams[i]->codec->codec_type).arg(i)); break; } } // Close input file avformat_close_input(&inputFC); inputFC = NULL; if (mdata->Length() / 1000 != duration) { LOG(VB_GENERAL, LOG_INFO, QString("The length of this track in the database was %1s " "it is now %2s").arg(mdata->Length() / 1000).arg(duration)); // update the track length in the database mdata->setLength(duration * 1000); mdata->dumpToDatabase(); // tell any clients that the metadata for this track has changed gCoreContext->SendMessage(QString("MUSIC_METADATA_CHANGED %1").arg(songID)); } else { LOG(VB_GENERAL, LOG_INFO, QString("The length of this track is unchanged %1s") .arg(mdata->Length() / 1000)); } return GENERIC_EXIT_OK; }
static void fa_lavf_load_meta(metadata_t *md, AVFormatContext *fctx, const char *filename) { int i; char tmp1[1024]; int has_video = 0; int has_audio = 0; md->md_artist = ffmpeg_metadata_rstr(fctx->metadata, "artist") ?: ffmpeg_metadata_rstr(fctx->metadata, "author"); md->md_album = ffmpeg_metadata_rstr(fctx->metadata, "album"); md->md_format = rstr_alloc(fctx->iformat->long_name); if(fctx->duration != AV_NOPTS_VALUE) md->md_duration = (float)fctx->duration / 1000000; for(i = 0; i < fctx->nb_streams; i++) { AVStream *stream = fctx->streams[i]; AVCodecContext *avctx = stream->codec; if(avctx->codec_type == AVMEDIA_TYPE_AUDIO) has_audio = 1; if(avctx->codec_type == AVMEDIA_TYPE_VIDEO && !(stream->disposition & AV_DISPOSITION_ATTACHED_PIC)) has_video = 1; } if(has_audio && !has_video) { md->md_contenttype = CONTENT_AUDIO; md->md_title = ffmpeg_metadata_rstr(fctx->metadata, "title"); md->md_track = ffmpeg_metadata_int(fctx->metadata, "track", filename ? atoi(filename) : 0); return; } has_audio = 0; has_video = 0; if(1) { int atrack = 0; int strack = 0; int vtrack = 0; /* Check each stream */ for(i = 0; i < fctx->nb_streams; i++) { AVStream *stream = fctx->streams[i]; AVCodecContext *avctx = stream->codec; AVCodec *codec = avcodec_find_decoder(avctx->codec_id); AVDictionaryEntry *lang, *title; int tn; char str[256]; avcodec_string(str, sizeof(str), avctx, 0); TRACE(TRACE_DEBUG, "Probe", " Stream #%d: %s", i, str); switch(avctx->codec_type) { case AVMEDIA_TYPE_VIDEO: has_video = !!codec; tn = ++vtrack; break; case AVMEDIA_TYPE_AUDIO: has_audio = !!codec; tn = ++atrack; break; case AVMEDIA_TYPE_SUBTITLE: tn = ++strack; break; default: continue; } if(codec == NULL) { snprintf(tmp1, sizeof(tmp1), "%s", codecname(avctx->codec_id)); } else { metadata_from_libav(tmp1, sizeof(tmp1), codec, avctx); } lang = av_dict_get(stream->metadata, "language", NULL, AV_DICT_IGNORE_SUFFIX); title = av_dict_get(stream->metadata, "title", NULL, AV_DICT_IGNORE_SUFFIX); metadata_add_stream(md, codecname(avctx->codec_id), avctx->codec_type, i, title ? title->value : NULL, tmp1, lang ? lang->value : NULL, stream->disposition, tn); } md->md_contenttype = CONTENT_FILE; if(has_video) { md->md_contenttype = CONTENT_VIDEO; } else if(has_audio) { md->md_contenttype = CONTENT_AUDIO; } } }