/***************************************************************************** * ffmpeg_OpenCodec: *****************************************************************************/ int ffmpeg_OpenCodec( decoder_t *p_dec ) { decoder_sys_t *p_sys = p_dec->p_sys; if( p_sys->p_context->extradata_size <= 0 ) { if( p_sys->i_codec_id == AV_CODEC_ID_VC1 || p_sys->i_codec_id == AV_CODEC_ID_VORBIS || p_sys->i_codec_id == AV_CODEC_ID_THEORA || ( p_sys->i_codec_id == AV_CODEC_ID_AAC && !p_dec->fmt_in.b_packetized ) ) { msg_Warn( p_dec, "waiting for extra data for codec %s", p_sys->psz_namecodec ); return 1; } } if( p_dec->fmt_in.i_cat == VIDEO_ES ) { p_sys->p_context->width = p_dec->fmt_in.video.i_width; p_sys->p_context->height = p_dec->fmt_in.video.i_height; p_sys->p_context->bits_per_coded_sample = p_dec->fmt_in.video.i_bits_per_pixel; } else if( p_dec->fmt_in.i_cat == AUDIO_ES ) { p_sys->p_context->sample_rate = p_dec->fmt_in.audio.i_rate; p_sys->p_context->channels = p_dec->fmt_in.audio.i_channels; p_sys->p_context->block_align = p_dec->fmt_in.audio.i_blockalign; p_sys->p_context->bit_rate = p_dec->fmt_in.i_bitrate; p_sys->p_context->bits_per_coded_sample = p_dec->fmt_in.audio.i_bitspersample; if( p_sys->i_codec_id == AV_CODEC_ID_ADPCM_G726 && p_sys->p_context->bit_rate > 0 && p_sys->p_context->sample_rate > 0) p_sys->p_context->bits_per_coded_sample = p_sys->p_context->bit_rate / p_sys->p_context->sample_rate; } int ret; char *psz_opts = var_InheritString( p_dec, "avcodec-options" ); AVDictionary *options = NULL; if (psz_opts && *psz_opts) options = vlc_av_get_options(psz_opts); free(psz_opts); vlc_avcodec_lock(); ret = avcodec_open2( p_sys->p_context, p_sys->p_codec, options ? &options : NULL ); vlc_avcodec_unlock(); AVDictionaryEntry *t = NULL; while ((t = av_dict_get(options, "", t, AV_DICT_IGNORE_SUFFIX))) { msg_Err( p_dec, "Unknown option \"%s\"", t->key ); } av_dict_free(&options); if( ret < 0 ) return VLC_EGENERIC; msg_Dbg( p_dec, "avcodec codec (%s) started", p_sys->psz_namecodec ); #ifdef HAVE_AVCODEC_MT if( p_dec->fmt_in.i_cat == VIDEO_ES ) { switch( p_sys->p_context->active_thread_type ) { case FF_THREAD_FRAME: msg_Dbg( p_dec, "using frame thread mode with %d threads", p_sys->p_context->thread_count ); break; case FF_THREAD_SLICE: msg_Dbg( p_dec, "using slice thread mode with %d threads", p_sys->p_context->thread_count ); break; case 0: if( p_sys->p_context->thread_count > 1 ) msg_Warn( p_dec, "failed to enable threaded decoding" ); break; default: msg_Warn( p_dec, "using unknown thread mode with %d threads", p_sys->p_context->thread_count ); break; } } #endif p_sys->b_delayed_open = false; return VLC_SUCCESS; }
/* * Fills metadata read with ffmpeg/libav from the given path into the given mfi * * Following attributes from the given mfi are read to control how to read metadata: * - data_kind: if data_kind is http, icy metadata is used, if the path points to a playlist the first stream-uri in that playlist is used * - media_kind: if media_kind is podcast or audiobook, video streams in the file are ignored * - compilation: like podcast/audiobook video streams are ignored for compilations * - file_size: if bitrate could not be read through ffmpeg/libav, file_size is used to estimate the bitrate * - fname: (filename) used as fallback for artist */ int scan_metadata_ffmpeg(const char *file, struct media_file_info *mfi) { AVFormatContext *ctx; AVDictionary *options; const struct metadata_map *extra_md_map; struct http_icy_metadata *icy_metadata; enum AVMediaType codec_type; enum AVCodecID codec_id; enum AVCodecID video_codec_id; enum AVCodecID audio_codec_id; enum AVSampleFormat sample_fmt; AVStream *video_stream; AVStream *audio_stream; char *path; int mdcount; int sample_rate; int i; int ret; ctx = NULL; options = NULL; path = strdup(file); if (mfi->data_kind == DATA_KIND_HTTP) { #ifndef HAVE_FFMPEG // Without this, libav is slow to probe some internet streams ctx = avformat_alloc_context(); ctx->probesize = 64000; #endif free(path); ret = http_stream_setup(&path, file); if (ret < 0) return -1; av_dict_set(&options, "icy", "1", 0); mfi->artwork = ARTWORK_HTTP; } ret = avformat_open_input(&ctx, path, NULL, &options); if (options) av_dict_free(&options); if (ret != 0) { DPRINTF(E_WARN, L_SCAN, "Cannot open media file '%s': %s\n", path, err2str(ret)); free(path); return -1; } ret = avformat_find_stream_info(ctx, NULL); if (ret < 0) { DPRINTF(E_WARN, L_SCAN, "Cannot get stream info of '%s': %s\n", path, err2str(ret)); avformat_close_input(&ctx); free(path); return -1; } free(path); #if 0 /* Dump input format as determined by ffmpeg */ av_dump_format(ctx, 0, file, 0); #endif DPRINTF(E_DBG, L_SCAN, "File has %d streams\n", ctx->nb_streams); /* Extract codec IDs, check for video */ video_codec_id = AV_CODEC_ID_NONE; video_stream = NULL; audio_codec_id = AV_CODEC_ID_NONE; audio_stream = NULL; for (i = 0; i < ctx->nb_streams; i++) { codec_type = ctx->streams[i]->codecpar->codec_type; codec_id = ctx->streams[i]->codecpar->codec_id; sample_rate = ctx->streams[i]->codecpar->sample_rate; sample_fmt = ctx->streams[i]->codecpar->format; switch (codec_type) { case AVMEDIA_TYPE_VIDEO: if (ctx->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC) { DPRINTF(E_DBG, L_SCAN, "Found embedded artwork (stream %d)\n", i); mfi->artwork = ARTWORK_EMBEDDED; break; } // We treat these as audio no matter what if (mfi->compilation || (mfi->media_kind & (MEDIA_KIND_PODCAST | MEDIA_KIND_AUDIOBOOK))) break; if (!video_stream) { DPRINTF(E_DBG, L_SCAN, "File has video (stream %d)\n", i); video_stream = ctx->streams[i]; video_codec_id = codec_id; mfi->has_video = 1; } break; case AVMEDIA_TYPE_AUDIO: if (!audio_stream) { audio_stream = ctx->streams[i]; audio_codec_id = codec_id; mfi->samplerate = sample_rate; mfi->bits_per_sample = 8 * av_get_bytes_per_sample(sample_fmt); if (mfi->bits_per_sample == 0) mfi->bits_per_sample = av_get_bits_per_sample(codec_id); } break; default: break; } } if (audio_codec_id == AV_CODEC_ID_NONE) { DPRINTF(E_DBG, L_SCAN, "File has no audio streams, discarding\n"); avformat_close_input(&ctx); return -1; } /* Common media information */ if (ctx->duration > 0) mfi->song_length = ctx->duration / (AV_TIME_BASE / 1000); /* ms */ if (ctx->bit_rate > 0) mfi->bitrate = ctx->bit_rate / 1000; else if (ctx->duration > AV_TIME_BASE) /* guesstimate */ mfi->bitrate = ((mfi->file_size * 8) / (ctx->duration / AV_TIME_BASE)) / 1000; DPRINTF(E_DBG, L_SCAN, "Duration %d ms, bitrate %d kbps\n", mfi->song_length, mfi->bitrate); /* Try to extract ICY metadata if http stream */ if (mfi->data_kind == DATA_KIND_HTTP) { icy_metadata = http_icy_metadata_get(ctx, 0); if (icy_metadata && icy_metadata->name) { DPRINTF(E_DBG, L_SCAN, "Found ICY metadata, name is '%s'\n", icy_metadata->name); if (mfi->title) free(mfi->title); if (mfi->artist) free(mfi->artist); if (mfi->album_artist) free(mfi->album_artist); mfi->title = strdup(icy_metadata->name); mfi->artist = strdup(icy_metadata->name); mfi->album_artist = strdup(icy_metadata->name); } if (icy_metadata && icy_metadata->description) { DPRINTF(E_DBG, L_SCAN, "Found ICY metadata, description is '%s'\n", icy_metadata->description); if (mfi->album) free(mfi->album); mfi->album = strdup(icy_metadata->description); } if (icy_metadata && icy_metadata->genre) { DPRINTF(E_DBG, L_SCAN, "Found ICY metadata, genre is '%s'\n", icy_metadata->genre); if (mfi->genre) free(mfi->genre); mfi->genre = strdup(icy_metadata->genre); } if (icy_metadata) http_icy_metadata_free(icy_metadata, 0); } /* Check codec */ extra_md_map = NULL; codec_id = (mfi->has_video) ? video_codec_id : audio_codec_id; switch (codec_id) { case AV_CODEC_ID_AAC: DPRINTF(E_DBG, L_SCAN, "AAC\n"); mfi->type = strdup("m4a"); mfi->codectype = strdup("mp4a"); mfi->description = strdup("AAC audio file"); break; case AV_CODEC_ID_ALAC: DPRINTF(E_DBG, L_SCAN, "ALAC\n"); mfi->type = strdup("m4a"); mfi->codectype = strdup("alac"); mfi->description = strdup("Apple Lossless audio file"); break; case AV_CODEC_ID_FLAC: DPRINTF(E_DBG, L_SCAN, "FLAC\n"); mfi->type = strdup("flac"); mfi->codectype = strdup("flac"); mfi->description = strdup("FLAC audio file"); extra_md_map = md_map_vorbis; break; case AV_CODEC_ID_APE: DPRINTF(E_DBG, L_SCAN, "APE\n"); mfi->type = strdup("ape"); mfi->codectype = strdup("ape"); mfi->description = strdup("Monkey's audio"); break; case AV_CODEC_ID_MUSEPACK7: case AV_CODEC_ID_MUSEPACK8: DPRINTF(E_DBG, L_SCAN, "Musepack\n"); mfi->type = strdup("mpc"); mfi->codectype = strdup("mpc"); mfi->description = strdup("Musepack audio file"); break; case AV_CODEC_ID_MPEG4: /* Video */ case AV_CODEC_ID_H264: DPRINTF(E_DBG, L_SCAN, "MPEG4 video\n"); mfi->type = strdup("m4v"); mfi->codectype = strdup("mp4v"); mfi->description = strdup("MPEG-4 video file"); extra_md_map = md_map_tv; break; case AV_CODEC_ID_MP3: DPRINTF(E_DBG, L_SCAN, "MP3\n"); mfi->type = strdup("mp3"); mfi->codectype = strdup("mpeg"); mfi->description = strdup("MPEG audio file"); extra_md_map = md_map_id3; break; case AV_CODEC_ID_VORBIS: DPRINTF(E_DBG, L_SCAN, "VORBIS\n"); mfi->type = strdup("ogg"); mfi->codectype = strdup("ogg"); mfi->description = strdup("Ogg Vorbis audio file"); extra_md_map = md_map_vorbis; break; case AV_CODEC_ID_WMAV1: case AV_CODEC_ID_WMAV2: case AV_CODEC_ID_WMAVOICE: DPRINTF(E_DBG, L_SCAN, "WMA Voice\n"); mfi->type = strdup("wma"); mfi->codectype = strdup("wmav"); mfi->description = strdup("WMA audio file"); break; case AV_CODEC_ID_WMAPRO: DPRINTF(E_DBG, L_SCAN, "WMA Pro\n"); mfi->type = strdup("wmap"); mfi->codectype = strdup("wma"); mfi->description = strdup("WMA audio file"); break; case AV_CODEC_ID_WMALOSSLESS: DPRINTF(E_DBG, L_SCAN, "WMA Lossless\n"); mfi->type = strdup("wma"); mfi->codectype = strdup("wmal"); mfi->description = strdup("WMA audio file"); break; case AV_CODEC_ID_PCM_S16LE ... AV_CODEC_ID_PCM_F64LE: if (strcmp(ctx->iformat->name, "aiff") == 0) { DPRINTF(E_DBG, L_SCAN, "AIFF\n"); mfi->type = strdup("aif"); mfi->codectype = strdup("aif"); mfi->description = strdup("AIFF audio file"); break; } else if (strcmp(ctx->iformat->name, "wav") == 0) { DPRINTF(E_DBG, L_SCAN, "WAV\n"); mfi->type = strdup("wav"); mfi->codectype = strdup("wav"); mfi->description = strdup("WAV audio file"); break; } /* WARNING: will fallthrough to default case, don't move */ /* FALLTHROUGH */ default: DPRINTF(E_DBG, L_SCAN, "Unknown codec 0x%x (video: %s), format %s (%s)\n", codec_id, (mfi->has_video) ? "yes" : "no", ctx->iformat->name, ctx->iformat->long_name); mfi->type = strdup("unkn"); mfi->codectype = strdup("unkn"); if (mfi->has_video) { mfi->description = strdup("Unknown video file format"); extra_md_map = md_map_tv; } else mfi->description = strdup("Unknown audio file format"); break; } mdcount = 0; if ((!ctx->metadata) && (!audio_stream->metadata) && (video_stream && !video_stream->metadata)) { DPRINTF(E_WARN, L_SCAN, "ffmpeg reports no metadata\n"); goto skip_extract; } if (extra_md_map) { ret = extract_metadata(mfi, ctx, audio_stream, video_stream, extra_md_map); mdcount += ret; DPRINTF(E_DBG, L_SCAN, "Picked up %d tags with extra md_map\n", ret); } ret = extract_metadata(mfi, ctx, audio_stream, video_stream, md_map_generic); mdcount += ret; DPRINTF(E_DBG, L_SCAN, "Picked up %d tags with generic md_map, %d tags total\n", ret, mdcount); /* fix up TV metadata */ if (mfi->media_kind == 10) { /* I have no idea why this is, but iTunes reports a media kind of 64 for stik==10 (?!) */ mfi->media_kind = MEDIA_KIND_TVSHOW; } /* Unspecified video files are "Movies", media_kind 2 */ else if (mfi->has_video == 1) { mfi->media_kind = MEDIA_KIND_MOVIE; } skip_extract: avformat_close_input(&ctx); if (mdcount == 0) DPRINTF(E_WARN, L_SCAN, "ffmpeg/libav could not extract any metadata\n"); /* Just in case there's no title set ... */ if (mfi->title == NULL) mfi->title = strdup(mfi->fname); /* All done */ return 0; }
int av_frame_copy_props(AVFrame *dst, const AVFrame *src) { int i; dst->key_frame = src->key_frame; dst->pict_type = src->pict_type; dst->sample_aspect_ratio = src->sample_aspect_ratio; dst->pts = src->pts; dst->repeat_pict = src->repeat_pict; dst->interlaced_frame = src->interlaced_frame; dst->top_field_first = src->top_field_first; dst->palette_has_changed = src->palette_has_changed; dst->sample_rate = src->sample_rate; dst->opaque = src->opaque; #if FF_API_AVFRAME_LAVC dst->type = src->type; #endif dst->pkt_pts = src->pkt_pts; dst->pkt_dts = src->pkt_dts; dst->pkt_pos = src->pkt_pos; dst->pkt_size = src->pkt_size; dst->pkt_duration = src->pkt_duration; dst->reordered_opaque = src->reordered_opaque; dst->quality = src->quality; dst->best_effort_timestamp = src->best_effort_timestamp; dst->coded_picture_number = src->coded_picture_number; dst->display_picture_number = src->display_picture_number; dst->flags = src->flags; dst->decode_error_flags = src->decode_error_flags; dst->colorspace = src->colorspace; dst->color_range = src->color_range; av_dict_copy(&dst->metadata, src->metadata, 0); memcpy(dst->error, src->error, sizeof(dst->error)); for (i = 0; i < src->nb_side_data; i++) { const AVFrameSideData *sd_src = src->side_data[i]; AVFrameSideData *sd_dst = av_frame_new_side_data(dst, sd_src->type, sd_src->size); if (!sd_dst) { for (i = 0; i < dst->nb_side_data; i++) { av_freep(&dst->side_data[i]->data); av_freep(&dst->side_data[i]); av_dict_free(&dst->side_data[i]->metadata); } av_freep(&dst->side_data); return AVERROR(ENOMEM); } memcpy(sd_dst->data, sd_src->data, sd_src->size); av_dict_copy(&sd_dst->metadata, sd_src->metadata, 0); } dst->qscale_table = NULL; dst->qstride = 0; dst->qscale_type = 0; if (src->qp_table_buf) { dst->qp_table_buf = av_buffer_ref(src->qp_table_buf); if (dst->qp_table_buf) { dst->qscale_table = dst->qp_table_buf->data; dst->qstride = src->qstride; dst->qscale_type = src->qscale_type; } } return 0; }
bool FFmpegEncoderOpen(struct FFmpegEncoder* encoder, const char* outfile) { AVCodec* acodec = avcodec_find_encoder_by_name(encoder->audioCodec); AVCodec* vcodec = avcodec_find_encoder_by_name(encoder->videoCodec); if ((encoder->audioCodec && !acodec) || !vcodec || !FFmpegEncoderVerifyContainer(encoder)) { return false; } encoder->currentAudioSample = 0; encoder->currentAudioFrame = 0; encoder->currentVideoFrame = 0; encoder->nextAudioPts = 0; AVOutputFormat* oformat = av_guess_format(encoder->containerFormat, 0, 0); #ifndef USE_LIBAV avformat_alloc_output_context2(&encoder->context, oformat, 0, outfile); #else encoder->context = avformat_alloc_context(); strncpy(encoder->context->filename, outfile, sizeof(encoder->context->filename) - 1); encoder->context->filename[sizeof(encoder->context->filename) - 1] = '\0'; encoder->context->oformat = oformat; #endif if (acodec) { #ifdef FFMPEG_USE_CODECPAR encoder->audioStream = avformat_new_stream(encoder->context, NULL); encoder->audio = avcodec_alloc_context3(acodec); #else encoder->audioStream = avformat_new_stream(encoder->context, acodec); encoder->audio = encoder->audioStream->codec; #endif encoder->audio->bit_rate = encoder->audioBitrate; encoder->audio->channels = 2; encoder->audio->channel_layout = AV_CH_LAYOUT_STEREO; encoder->audio->sample_rate = encoder->sampleRate; encoder->audio->sample_fmt = encoder->sampleFormat; AVDictionary* opts = 0; av_dict_set(&opts, "strict", "-2", 0); if (encoder->context->oformat->flags & AVFMT_GLOBALHEADER) { #ifdef AV_CODEC_FLAG_GLOBAL_HEADER encoder->audio->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; #else encoder->audio->flags |= CODEC_FLAG_GLOBAL_HEADER; #endif } avcodec_open2(encoder->audio, acodec, &opts); av_dict_free(&opts); #if LIBAVCODEC_VERSION_MAJOR >= 55 encoder->audioFrame = av_frame_alloc(); #else encoder->audioFrame = avcodec_alloc_frame(); #endif if (!encoder->audio->frame_size) { encoder->audio->frame_size = 1; } encoder->audioFrame->nb_samples = encoder->audio->frame_size; encoder->audioFrame->format = encoder->audio->sample_fmt; encoder->audioFrame->pts = 0; #ifdef USE_LIBAVRESAMPLE encoder->resampleContext = avresample_alloc_context(); av_opt_set_int(encoder->resampleContext, "in_channel_layout", AV_CH_LAYOUT_STEREO, 0); av_opt_set_int(encoder->resampleContext, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0); av_opt_set_int(encoder->resampleContext, "in_sample_rate", PREFERRED_SAMPLE_RATE, 0); av_opt_set_int(encoder->resampleContext, "out_sample_rate", encoder->sampleRate, 0); av_opt_set_int(encoder->resampleContext, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0); av_opt_set_int(encoder->resampleContext, "out_sample_fmt", encoder->sampleFormat, 0); avresample_open(encoder->resampleContext); #else encoder->resampleContext = swr_alloc_set_opts(NULL, AV_CH_LAYOUT_STEREO, encoder->sampleFormat, encoder->sampleRate, AV_CH_LAYOUT_STEREO, AV_SAMPLE_FMT_S16, PREFERRED_SAMPLE_RATE, 0, NULL); swr_init(encoder->resampleContext); #endif encoder->audioBufferSize = (encoder->audioFrame->nb_samples * PREFERRED_SAMPLE_RATE / encoder->sampleRate) * 4; encoder->audioBuffer = av_malloc(encoder->audioBufferSize); encoder->postaudioBufferSize = av_samples_get_buffer_size(0, encoder->audio->channels, encoder->audio->frame_size, encoder->audio->sample_fmt, 0); encoder->postaudioBuffer = av_malloc(encoder->postaudioBufferSize); avcodec_fill_audio_frame(encoder->audioFrame, encoder->audio->channels, encoder->audio->sample_fmt, (const uint8_t*) encoder->postaudioBuffer, encoder->postaudioBufferSize, 0); if (encoder->audio->codec->id == AV_CODEC_ID_AAC && (strcasecmp(encoder->containerFormat, "mp4") || strcasecmp(encoder->containerFormat, "m4v") || strcasecmp(encoder->containerFormat, "mov"))) { // MP4 container doesn't support the raw ADTS AAC format that the encoder spits out #ifdef FFMPEG_USE_NEW_BSF av_bsf_alloc(av_bsf_get_by_name("aac_adtstoasc"), &encoder->absf); avcodec_parameters_from_context(encoder->absf->par_in, encoder->audio); av_bsf_init(encoder->absf); #else encoder->absf = av_bitstream_filter_init("aac_adtstoasc"); #endif } #ifdef FFMPEG_USE_CODECPAR avcodec_parameters_from_context(encoder->audioStream->codecpar, encoder->audio); #endif } #ifdef FFMPEG_USE_CODECPAR encoder->videoStream = avformat_new_stream(encoder->context, NULL); encoder->video = avcodec_alloc_context3(vcodec); #else encoder->videoStream = avformat_new_stream(encoder->context, vcodec); encoder->video = encoder->videoStream->codec; #endif encoder->video->bit_rate = encoder->videoBitrate; encoder->video->width = encoder->width; encoder->video->height = encoder->height; encoder->video->time_base = (AVRational) { VIDEO_TOTAL_LENGTH, GBA_ARM7TDMI_FREQUENCY }; encoder->video->pix_fmt = encoder->pixFormat; encoder->video->gop_size = 60; encoder->video->max_b_frames = 3; if (encoder->context->oformat->flags & AVFMT_GLOBALHEADER) { #ifdef AV_CODEC_FLAG_GLOBAL_HEADER encoder->video->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; #else encoder->video->flags |= CODEC_FLAG_GLOBAL_HEADER; #endif } if (encoder->video->codec->id == AV_CODEC_ID_H264 && (strcasecmp(encoder->containerFormat, "mp4") || strcasecmp(encoder->containerFormat, "m4v") || strcasecmp(encoder->containerFormat, "mov"))) { // QuickTime and a few other things require YUV420 encoder->video->pix_fmt = AV_PIX_FMT_YUV420P; } if (strcmp(vcodec->name, "libx264") == 0) { // Try to adaptively figure out when you can use a slower encoder if (encoder->width * encoder->height > 1000000) { av_opt_set(encoder->video->priv_data, "preset", "superfast", 0); } else if (encoder->width * encoder->height > 500000) { av_opt_set(encoder->video->priv_data, "preset", "veryfast", 0); } else { av_opt_set(encoder->video->priv_data, "preset", "faster", 0); } if (encoder->videoBitrate == 0) { av_opt_set(encoder->video->priv_data, "crf", "0", 0); encoder->video->pix_fmt = AV_PIX_FMT_YUV444P; } } avcodec_open2(encoder->video, vcodec, 0); #if LIBAVCODEC_VERSION_MAJOR >= 55 encoder->videoFrame = av_frame_alloc(); #else encoder->videoFrame = avcodec_alloc_frame(); #endif encoder->videoFrame->format = encoder->video->pix_fmt; encoder->videoFrame->width = encoder->video->width; encoder->videoFrame->height = encoder->video->height; encoder->videoFrame->pts = 0; _ffmpegSetVideoDimensions(&encoder->d, encoder->iwidth, encoder->iheight); av_image_alloc(encoder->videoFrame->data, encoder->videoFrame->linesize, encoder->video->width, encoder->video->height, encoder->video->pix_fmt, 32); #ifdef FFMPEG_USE_CODECPAR avcodec_parameters_from_context(encoder->videoStream->codecpar, encoder->video); #endif if (avio_open(&encoder->context->pb, outfile, AVIO_FLAG_WRITE) < 0) { return false; } return avformat_write_header(encoder->context, 0) >= 0; }
static int open_slave(AVFormatContext *avf, char *slave, TeeSlave *tee_slave) { int i, ret; AVDictionary *options = NULL; AVDictionaryEntry *entry; char *filename; char *format = NULL, *select = NULL; AVFormatContext *avf2 = NULL; AVStream *st, *st2; int stream_count; if ((ret = parse_slave_options(avf, slave, &options, &filename)) < 0) return ret; #define STEAL_OPTION(option, field) do { \ if ((entry = av_dict_get(options, option, NULL, 0))) { \ field = entry->value; \ entry->value = NULL; /* prevent it from being freed */ \ av_dict_set(&options, option, NULL, 0); \ } \ } while (0) STEAL_OPTION("f", format); STEAL_OPTION("select", select); ret = avformat_alloc_output_context2(&avf2, NULL, format, filename); if (ret < 0) goto end; av_dict_copy(&avf2->metadata, avf->metadata, 0); tee_slave->stream_map = av_calloc(avf->nb_streams, sizeof(*tee_slave->stream_map)); if (!tee_slave->stream_map) { ret = AVERROR(ENOMEM); goto end; } stream_count = 0; for (i = 0; i < avf->nb_streams; i++) { st = avf->streams[i]; if (select) { ret = avformat_match_stream_specifier(avf, avf->streams[i], select); if (ret < 0) { av_log(avf, AV_LOG_ERROR, "Invalid stream specifier '%s' for output '%s'\n", select, slave); goto end; } if (ret == 0) { /* no match */ tee_slave->stream_map[i] = -1; continue; } } tee_slave->stream_map[i] = stream_count++; if (!(st2 = avformat_new_stream(avf2, NULL))) { ret = AVERROR(ENOMEM); goto end; } st2->id = st->id; st2->r_frame_rate = st->r_frame_rate; st2->time_base = st->time_base; st2->start_time = st->start_time; st2->duration = st->duration; st2->nb_frames = st->nb_frames; st2->disposition = st->disposition; st2->sample_aspect_ratio = st->sample_aspect_ratio; st2->avg_frame_rate = st->avg_frame_rate; av_dict_copy(&st2->metadata, st->metadata, 0); if ((ret = avcodec_copy_context(st2->codec, st->codec)) < 0) goto end; } if (!(avf2->oformat->flags & AVFMT_NOFILE)) { if ((ret = avio_open(&avf2->pb, filename, AVIO_FLAG_WRITE)) < 0) { av_log(avf, AV_LOG_ERROR, "Slave '%s': error opening: %s\n", slave, av_err2str(ret)); goto end; } } if ((ret = avformat_write_header(avf2, &options)) < 0) { av_log(avf, AV_LOG_ERROR, "Slave '%s': error writing header: %s\n", slave, av_err2str(ret)); goto end; } tee_slave->avf = avf2; tee_slave->bsfs = av_calloc(avf2->nb_streams, sizeof(TeeSlave)); if (!tee_slave->bsfs) { ret = AVERROR(ENOMEM); goto end; } entry = NULL; while (entry = av_dict_get(options, "bsfs", NULL, AV_DICT_IGNORE_SUFFIX)) { const char *spec = entry->key + strlen("bsfs"); if (*spec) { if (strspn(spec, slave_bsfs_spec_sep) != 1) { av_log(avf, AV_LOG_ERROR, "Specifier separator in '%s' is '%c', but only characters '%s' " "are allowed\n", entry->key, *spec, slave_bsfs_spec_sep); return AVERROR(EINVAL); } spec++; /* consume separator */ } for (i = 0; i < avf2->nb_streams; i++) { ret = avformat_match_stream_specifier(avf2, avf2->streams[i], spec); if (ret < 0) { av_log(avf, AV_LOG_ERROR, "Invalid stream specifier '%s' in bsfs option '%s' for slave " "output '%s'\n", spec, entry->key, filename); goto end; } if (ret > 0) { av_log(avf, AV_LOG_DEBUG, "spec:%s bsfs:%s matches stream %d of slave " "output '%s'\n", spec, entry->value, i, filename); if (tee_slave->bsfs[i]) { av_log(avf, AV_LOG_WARNING, "Duplicate bsfs specification associated to stream %d of slave " "output '%s', filters will be ignored\n", i, filename); continue; } ret = parse_bsfs(avf, entry->value, &tee_slave->bsfs[i]); if (ret < 0) { av_log(avf, AV_LOG_ERROR, "Error parsing bitstream filter sequence '%s' associated to " "stream %d of slave output '%s'\n", entry->value, i, filename); goto end; } } } av_dict_set(&options, entry->key, NULL, 0); } if (options) { entry = NULL; while ((entry = av_dict_get(options, "", entry, AV_DICT_IGNORE_SUFFIX))) av_log(avf2, AV_LOG_ERROR, "Unknown option '%s'\n", entry->key); ret = AVERROR_OPTION_NOT_FOUND; goto end; } end: av_free(format); av_free(select); av_dict_free(&options); return ret; }
static av_cold int libx265_encode_init(AVCodecContext *avctx) { libx265Context *ctx = avctx->priv_data; x265_nal *nal; uint8_t *buf; int sar_num, sar_den; int nnal; int ret; int i; if (avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL && !av_pix_fmt_desc_get(avctx->pix_fmt)->log2_chroma_w && !av_pix_fmt_desc_get(avctx->pix_fmt)->log2_chroma_h) { av_log(avctx, AV_LOG_ERROR, "4:4:4 support is not fully defined for HEVC yet. " "Set -strict experimental to encode anyway.\n"); return AVERROR(ENOSYS); } avctx->coded_frame = av_frame_alloc(); if (!avctx->coded_frame) { av_log(avctx, AV_LOG_ERROR, "Could not allocate frame.\n"); return AVERROR(ENOMEM); } ctx->params = x265_param_alloc(); if (!ctx->params) { av_log(avctx, AV_LOG_ERROR, "Could not allocate x265 param structure.\n"); return AVERROR(ENOMEM); } if (x265_param_default_preset(ctx->params, ctx->preset, ctx->tune) < 0) { av_log(avctx, AV_LOG_ERROR, "Invalid preset or tune.\n"); return AVERROR(EINVAL); } ctx->params->frameNumThreads = avctx->thread_count; ctx->params->fpsNum = avctx->time_base.den; ctx->params->fpsDenom = avctx->time_base.num * avctx->ticks_per_frame; ctx->params->sourceWidth = avctx->width; ctx->params->sourceHeight = avctx->height; av_reduce(&sar_num, &sar_den, avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 4096); ctx->params->vui.bEnableVuiParametersPresentFlag = 1; ctx->params->vui.bEnableAspectRatioIdc = 1; ctx->params->vui.aspectRatioIdc = 255; ctx->params->vui.sarWidth = sar_num; ctx->params->vui.sarHeight = sar_den; if (x265_max_bit_depth == 8) ctx->params->internalBitDepth = 8; else if (x265_max_bit_depth == 12) ctx->params->internalBitDepth = 10; switch (avctx->pix_fmt) { case AV_PIX_FMT_YUV420P: case AV_PIX_FMT_YUV420P10: ctx->params->internalCsp = X265_CSP_I420; break; case AV_PIX_FMT_YUV444P: case AV_PIX_FMT_YUV444P10: ctx->params->internalCsp = X265_CSP_I444; break; } if (avctx->bit_rate > 0) { ctx->params->rc.bitrate = avctx->bit_rate / 1000; ctx->params->rc.rateControlMode = X265_RC_ABR; } if (ctx->x265_opts) { AVDictionary *dict = NULL; AVDictionaryEntry *en = NULL; if (!av_dict_parse_string(&dict, ctx->x265_opts, "=", ":", 0)) { while ((en = av_dict_get(dict, "", en, AV_DICT_IGNORE_SUFFIX))) { int parse_ret = x265_param_parse(ctx->params, en->key, en->value); switch (parse_ret) { case X265_PARAM_BAD_NAME: av_log(avctx, AV_LOG_WARNING, "Unknown option: %s.\n", en->key); break; case X265_PARAM_BAD_VALUE: av_log(avctx, AV_LOG_WARNING, "Invalid value for %s: %s.\n", en->key, en->value); break; default: break; } } av_dict_free(&dict); } } ctx->encoder = x265_encoder_open(ctx->params); if (!ctx->encoder) { av_log(avctx, AV_LOG_ERROR, "Cannot open libx265 encoder.\n"); libx265_encode_close(avctx); return AVERROR_INVALIDDATA; } ret = x265_encoder_headers(ctx->encoder, &nal, &nnal); if (ret < 0) { av_log(avctx, AV_LOG_ERROR, "Cannot encode headers.\n"); libx265_encode_close(avctx); return AVERROR_INVALIDDATA; } for (i = 0; i < nnal; i++) ctx->header_size += nal[i].sizeBytes; ctx->header = av_malloc(ctx->header_size + FF_INPUT_BUFFER_PADDING_SIZE); if (!ctx->header) { av_log(avctx, AV_LOG_ERROR, "Cannot allocate HEVC header of size %d.\n", ctx->header_size); libx265_encode_close(avctx); return AVERROR(ENOMEM); } buf = ctx->header; for (i = 0; i < nnal; i++) { memcpy(buf, nal[i].payload, nal[i].sizeBytes); buf += nal[i].sizeBytes; } if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) { avctx->extradata_size = ctx->header_size; avctx->extradata = ctx->header; ctx->header_size = 0; ctx->header = NULL; } return 0; }
QVector<QPair<QString, QString>> CameraDevice::getRawDeviceListGeneric() { QVector<QPair<QString, QString>> devices; if (!getDefaultInputFormat()) return devices; // Alloc an input device context AVFormatContext *s; if (!(s = avformat_alloc_context())) return devices; if (!iformat->priv_class || !AV_IS_INPUT_DEVICE(iformat->priv_class->category)) { avformat_free_context(s); return devices; } s->iformat = iformat; if (s->iformat->priv_data_size > 0) { s->priv_data = av_mallocz(s->iformat->priv_data_size); if (!s->priv_data) { avformat_free_context(s); return devices; } if (s->iformat->priv_class) { *(const AVClass**)s->priv_data= s->iformat->priv_class; av_opt_set_defaults(s->priv_data); } } else { s->priv_data = NULL; } // List the devices for this context AVDeviceInfoList* devlist = nullptr; AVDictionary *tmp = nullptr; av_dict_copy(&tmp, nullptr, 0); if (av_opt_set_dict2(s, &tmp, AV_OPT_SEARCH_CHILDREN) < 0) { av_dict_free(&tmp); avformat_free_context(s); return devices; } avdevice_list_devices(s, &devlist); av_dict_free(&tmp); avformat_free_context(s); if (!devlist) { qWarning() << "avdevice_list_devices failed"; return devices; } // Convert the list to a QVector devices.resize(devlist->nb_devices); for (int i=0; i<devlist->nb_devices; i++) { AVDeviceInfo* dev = devlist->devices[i]; devices[i].first = dev->device_name; devices[i].second = dev->device_description; } avdevice_free_list_devices(&devlist); return devices; }
int dc_audio_encoder_open(AudioOutputFile *audio_output_file, AudioDataConf *audio_data_conf) { AVDictionary *opts = NULL; audio_output_file->fifo = av_fifo_alloc(2 * MAX_AUDIO_PACKET_SIZE); audio_output_file->aframe = FF_ALLOC_FRAME(); audio_output_file->adata_buf = (uint8_t*) av_malloc(2 * MAX_AUDIO_PACKET_SIZE); #ifndef GPAC_USE_LIBAV audio_output_file->aframe->channels = -1; #endif #ifndef LIBAV_FRAME_OLD audio_output_file->aframe->channel_layout = 0; audio_output_file->aframe->sample_rate = -1; #endif audio_output_file->aframe->format = -1; audio_output_file->codec = avcodec_find_encoder_by_name(audio_data_conf->codec); if (audio_output_file->codec == NULL) { GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Output audio codec not found\n")); return -1; } audio_output_file->codec_ctx = avcodec_alloc_context3(audio_output_file->codec); audio_output_file->codec_ctx->codec_id = audio_output_file->codec->id; audio_output_file->codec_ctx->codec_type = AVMEDIA_TYPE_AUDIO; audio_output_file->codec_ctx->bit_rate = audio_data_conf->bitrate; audio_output_file->codec_ctx->sample_rate = DC_AUDIO_SAMPLE_RATE /*audio_data_conf->samplerate*/; { AVRational time_base; time_base.num = 1; time_base.den = audio_output_file->codec_ctx->sample_rate; audio_output_file->codec_ctx->time_base = time_base; } audio_output_file->codec_ctx->channels = audio_data_conf->channels; audio_output_file->codec_ctx->channel_layout = AV_CH_LAYOUT_STEREO; /*FIXME: depends on channels -> http://ffmpeg.org/doxygen/trunk/channel__layout_8c_source.html#l00074*/ audio_output_file->codec_ctx->sample_fmt = audio_output_file->codec->sample_fmts[0]; #ifdef DC_AUDIO_RESAMPLER audio_output_file->aresampler = NULL; #endif if (audio_data_conf->custom) { build_dict(audio_output_file->codec_ctx->priv_data, audio_data_conf->custom); } audio_output_file->astream_idx = 0; /* open the audio codec */ av_dict_set(&opts, "strict", "experimental", 0); if (avcodec_open2(audio_output_file->codec_ctx, audio_output_file->codec, &opts) < 0) { /*FIXME: if we enter here (set "mp2" as a codec and "200000" as a bitrate -> deadlock*/ GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Cannot open output audio codec\n")); av_dict_free(&opts); return -1; } av_dict_free(&opts); audio_output_file->frame_bytes = audio_output_file->codec_ctx->frame_size * av_get_bytes_per_sample(DC_AUDIO_SAMPLE_FORMAT) * DC_AUDIO_NUM_CHANNELS; #ifndef FF_API_AVFRAME_LAVC avcodec_get_frame_defaults(audio_output_file->aframe); #else av_frame_unref(audio_output_file->aframe); #endif audio_output_file->aframe->nb_samples = audio_output_file->codec_ctx->frame_size; if (avcodec_fill_audio_frame(audio_output_file->aframe, audio_output_file->codec_ctx->channels, audio_output_file->codec_ctx->sample_fmt, audio_output_file->adata_buf, audio_output_file->codec_ctx->frame_size * av_get_bytes_per_sample(audio_output_file->codec_ctx->sample_fmt) * audio_output_file->codec_ctx->channels, 1) < 0) { GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Fill audio frame failed\n")); return -1; } //audio_output_file->acc_samples = 0; return 0; }
Result SoundSourceFFmpeg::tryOpen(const AudioSourceConfig& /*audioSrcCfg*/) { unsigned int i; AVDictionary *l_iFormatOpts = NULL; const QByteArray qBAFilename(getLocalFileNameBytes()); qDebug() << "New SoundSourceFFmpeg :" << qBAFilename; DEBUG_ASSERT(!m_pFormatCtx); m_pFormatCtx = avformat_alloc_context(); #if LIBAVCODEC_VERSION_INT < 3622144 m_pFormatCtx->max_analyze_duration = 999999999; #endif // Open file and make m_pFormatCtx if (avformat_open_input(&m_pFormatCtx, qBAFilename.constData(), NULL, &l_iFormatOpts)!=0) { qDebug() << "av_open_input_file: cannot open" << qBAFilename; return ERR; } #if LIBAVCODEC_VERSION_INT > 3544932 av_dict_free(&l_iFormatOpts); #endif // Retrieve stream information if (avformat_find_stream_info(m_pFormatCtx, NULL)<0) { qDebug() << "av_find_stream_info: cannot open" << qBAFilename; return ERR; } //debug only (Enable if needed) //av_dump_format(m_pFormatCtx, 0, qBAFilename.constData(), false); // Find the first audio stream m_iAudioStream=-1; for (i=0; i<m_pFormatCtx->nb_streams; i++) if (m_pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO) { m_iAudioStream=i; break; } if (m_iAudioStream == -1) { qDebug() << "ffmpeg: cannot find an audio stream: cannot open" << qBAFilename; return ERR; } // Get a pointer to the codec context for the audio stream m_pCodecCtx=m_pFormatCtx->streams[m_iAudioStream]->codec; // Find the decoder for the audio stream if (!(m_pCodec=avcodec_find_decoder(m_pCodecCtx->codec_id))) { qDebug() << "ffmpeg: cannot find a decoder for" << qBAFilename; return ERR; } if (avcodec_open2(m_pCodecCtx, m_pCodec, NULL)<0) { qDebug() << "ffmpeg: cannot open" << qBAFilename; return ERR; } m_pResample = new EncoderFfmpegResample(m_pCodecCtx); m_pResample->open(m_pCodecCtx->sample_fmt, AV_SAMPLE_FMT_FLT); setChannelCount(m_pCodecCtx->channels); setFrameRate(m_pCodecCtx->sample_rate); setFrameCount((m_pFormatCtx->duration * m_pCodecCtx->sample_rate) / AV_TIME_BASE); qDebug() << "ffmpeg: Samplerate: " << getFrameRate() << ", Channels: " << getChannelCount() << "\n"; if (getChannelCount() > 2) { qDebug() << "ffmpeg: No support for more than 2 channels!"; return ERR; } return OK; }
static int parse_playlist(HLSContext *c, const char *url, struct variant *var, AVIOContext *in) { int ret = 0, duration = 0, is_segment = 0, is_variant = 0, bandwidth = 0; enum KeyType key_type = KEY_NONE; uint8_t iv[16] = ""; int has_iv = 0; char key[MAX_URL_SIZE] = ""; char line[1024]; const char *ptr; int close_in = 0; if (!in) { AVDictionary *opts = NULL; close_in = 1; /* Some HLS servers dont like being sent the range header */ av_dict_set(&opts, "seekable", "0", 0); // broker prior HTTP options that should be consistent across requests av_dict_set(&opts, "user-agent", c->user_agent, 0); av_dict_set(&opts, "cookies", c->cookies, 0); ret = avio_open2(&in, url, AVIO_FLAG_READ, c->interrupt_callback, &opts); av_dict_free(&opts); if (ret < 0) return ret; } read_chomp_line(in, line, sizeof(line)); if (strcmp(line, "#EXTM3U")) { ret = AVERROR_INVALIDDATA; goto fail; } if (var) { free_segment_list(var); var->finished = 0; } while (!url_feof(in)) { read_chomp_line(in, line, sizeof(line)); if (av_strstart(line, "#EXT-X-STREAM-INF:", &ptr)) { struct variant_info info = {{0}}; is_variant = 1; ff_parse_key_value(ptr, (ff_parse_key_val_cb) handle_variant_args, &info); bandwidth = atoi(info.bandwidth); } else if (av_strstart(line, "#EXT-X-KEY:", &ptr)) { struct key_info info = {{0}}; ff_parse_key_value(ptr, (ff_parse_key_val_cb) handle_key_args, &info); key_type = KEY_NONE; has_iv = 0; if (!strcmp(info.method, "AES-128")) key_type = KEY_AES_128; if (!strncmp(info.iv, "0x", 2) || !strncmp(info.iv, "0X", 2)) { ff_hex_to_data(iv, info.iv + 2); has_iv = 1; } av_strlcpy(key, info.uri, sizeof(key)); } else if (av_strstart(line, "#EXT-X-TARGETDURATION:", &ptr)) { if (!var) { var = new_variant(c, 0, url, NULL); if (!var) { ret = AVERROR(ENOMEM); goto fail; } } var->target_duration = atoi(ptr); } else if (av_strstart(line, "#EXT-X-MEDIA-SEQUENCE:", &ptr)) { if (!var) { var = new_variant(c, 0, url, NULL); if (!var) { ret = AVERROR(ENOMEM); goto fail; } } var->start_seq_no = atoi(ptr); } else if (av_strstart(line, "#EXT-X-ENDLIST", &ptr)) { if (var) var->finished = 1; } else if (av_strstart(line, "#EXTINF:", &ptr)) { is_segment = 1; duration = atoi(ptr); } else if (av_strstart(line, "#", NULL)) { continue; } else if (line[0]) { if (is_variant) { if (!new_variant(c, bandwidth, line, url)) { ret = AVERROR(ENOMEM); goto fail; } is_variant = 0; bandwidth = 0; } if (is_segment) { struct segment *seg; if (!var) { var = new_variant(c, 0, url, NULL); if (!var) { ret = AVERROR(ENOMEM); goto fail; } } seg = av_malloc(sizeof(struct segment)); if (!seg) { ret = AVERROR(ENOMEM); goto fail; } seg->duration = duration; seg->key_type = key_type; if (has_iv) { memcpy(seg->iv, iv, sizeof(iv)); } else { int seq = var->start_seq_no + var->n_segments; memset(seg->iv, 0, sizeof(seg->iv)); AV_WB32(seg->iv + 12, seq); } ff_make_absolute_url(seg->key, sizeof(seg->key), url, key); ff_make_absolute_url(seg->url, sizeof(seg->url), url, line); dynarray_add(&var->segments, &var->n_segments, seg); is_segment = 0; } } } if (var) var->last_load_time = av_gettime(); fail: if (close_in) avio_close(in); return ret; }
static int open_input(HLSContext *c, struct variant *var) { AVDictionary *opts = NULL; int ret; struct segment *seg = var->segments[var->cur_seq_no - var->start_seq_no]; // broker prior HTTP options that should be consistent across requests av_dict_set(&opts, "user-agent", c->user_agent, 0); av_dict_set(&opts, "cookies", c->cookies, 0); av_dict_set(&opts, "seekable", "0", 0); if (seg->key_type == KEY_NONE) { ret = ffurl_open(&var->input, seg->url, AVIO_FLAG_READ, &var->parent->interrupt_callback, &opts); goto cleanup; } else if (seg->key_type == KEY_AES_128) { char iv[33], key[33], url[MAX_URL_SIZE]; if (strcmp(seg->key, var->key_url)) { URLContext *uc; if (ffurl_open(&uc, seg->key, AVIO_FLAG_READ, &var->parent->interrupt_callback, &opts) == 0) { if (ffurl_read_complete(uc, var->key, sizeof(var->key)) != sizeof(var->key)) { av_log(NULL, AV_LOG_ERROR, "Unable to read key file %s\n", seg->key); } ffurl_close(uc); } else { av_log(NULL, AV_LOG_ERROR, "Unable to open key file %s\n", seg->key); } av_strlcpy(var->key_url, seg->key, sizeof(var->key_url)); } ff_data_to_hex(iv, seg->iv, sizeof(seg->iv), 0); ff_data_to_hex(key, var->key, sizeof(var->key), 0); iv[32] = key[32] = '\0'; if (strstr(seg->url, "://")) snprintf(url, sizeof(url), "crypto+%s", seg->url); else snprintf(url, sizeof(url), "crypto:%s", seg->url); if ((ret = ffurl_alloc(&var->input, url, AVIO_FLAG_READ, &var->parent->interrupt_callback)) < 0) goto cleanup; av_opt_set(var->input->priv_data, "key", key, 0); av_opt_set(var->input->priv_data, "iv", iv, 0); /* Need to repopulate options */ av_dict_free(&opts); av_dict_set(&opts, "seekable", "0", 0); if ((ret = ffurl_connect(var->input, &opts)) < 0) { ffurl_close(var->input); var->input = NULL; goto cleanup; } ret = 0; } else ret = AVERROR(ENOSYS); cleanup: av_dict_free(&opts); return ret; }
/* * ����һ�������������ģ�����Ƶ�ļ����н������ */ AVDecodeCtx *ffCreateDecodeContext( const char * filename, AVDictionary *opt_arg ) { int i, ret; AVInputFormat *file_iformat = NULL; AVDecodeCtx * pdc; AVDictionary * opt = NULL; ffInit(); pdc = (AVDecodeCtx *)malloc(sizeof(AVDecodeCtx)); while (pdc) { memset(pdc, 0, sizeof(AVDecodeCtx)); pdc->_fileName = strdup(filename); pdc->_ctx = avformat_alloc_context(); if (!pdc->_ctx) { av_log(NULL, AV_LOG_FATAL, "ffCreateDecodeContext : could not allocate context.\n"); break; } //filename = "video=.." ,open dshow device if (filename && strstr(filename, "video=") == filename){ file_iformat = av_find_input_format(CAP_DEVICE_NAME); if (!file_iformat){ av_log(NULL, AV_LOG_FATAL, "Unknown input format: '%s'\n",CAP_DEVICE_NAME); break; } } av_dict_copy(&opt, opt_arg, 0); ret = avformat_open_input(&pdc->_ctx, filename, file_iformat, &opt); av_dict_free(&opt); opt = NULL; if (ret < 0) { char errmsg[ERROR_BUFFER_SIZE]; av_strerror(ret, errmsg, ERROR_BUFFER_SIZE); av_log(NULL, AV_LOG_FATAL, "ffCreateDecodeContext %s.\n", errmsg); break; } av_format_inject_global_side_data(pdc->_ctx); av_dict_copy(&opt, opt_arg, 0); ret = avformat_find_stream_info(pdc->_ctx, NULL); av_dict_free(&opt); opt = NULL; if (ret < 0) { char errmsg[ERROR_BUFFER_SIZE]; av_strerror(ret, errmsg, ERROR_BUFFER_SIZE); av_log(NULL, AV_LOG_FATAL, "ffCreateDecodeContext %s.\n", errmsg); break; } /* * ������Ƶ������Ƶ�� */ ret = av_find_best_stream(pdc->_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0); if (ret >= 0) { pdc->has_video = 1; pdc->_video_st = pdc->_ctx->streams[ret]; pdc->_video_st_index = ret; } ret = av_find_best_stream(pdc->_ctx, AVMEDIA_TYPE_AUDIO, -1, -1, NULL, 0); if (ret >= 0) { pdc->has_audio = 1; pdc->_audio_st = pdc->_ctx->streams[ret]; pdc->_audio_st_index = ret; } if (pdc->has_video) { if (open_video(pdc, pdc->_video_st->codec->codec_id, NULL) < 0) { ffCloseDecodeContext(pdc); return NULL; } pdc->encode_video = 1; } if (pdc->has_audio) { if (open_audio(pdc, pdc->_audio_st->codec->codec_id, NULL) < 0) { ffCloseDecodeContext(pdc); return NULL; } pdc->encode_audio = 1; } return pdc; } /* * ʧ������ */ ffCloseDecodeContext(pdc); return NULL; }
bool OMXReader::open(std::string filename, bool doSkipAvProbe) { currentPTS = DVD_NOPTS_VALUE; fileName = filename; speed = DVD_PLAYSPEED_NORMAL; programID = UINT_MAX; AVIOInterruptCB int_cb = { interrupt_cb, NULL }; ClearStreams(); int result = -1; AVInputFormat *iformat = NULL; unsigned char *buffer = NULL; unsigned int flags = READ_TRUNCATED | READ_BITRATE | READ_CHUNKED; if(fileName.substr(0, 8) == "shout://" ) fileName.replace(0, 8, "http://"); if(fileName.substr(0,6) == "mms://" || fileName.substr(0,7) == "mmsh://" || fileName.substr(0,7) == "mmst://" || fileName.substr(0,7) == "mmsu://" || fileName.substr(0,7) == "http://" || fileName.substr(0,7) == "rtmp://" || fileName.substr(0,6) == "udp://" || fileName.substr(0,7) == "rtsp://" ) { doSkipAvProbe = false; // ffmpeg dislikes the useragent from AirPlay urls //int idx = fileName.Find("|User-Agent=AppleCoreMedia"); size_t idx = fileName.find("|"); if(idx != string::npos) fileName = fileName.substr(0, idx); AVDictionary *d = NULL; // Enable seeking if http if(fileName.substr(0,7) == "http://") { av_dict_set(&d, "seekable", "1", 0); } ofLog(OF_LOG_VERBOSE, "OMXPlayer::OpenFile - avformat_open_input %s ", fileName.c_str()); result = avformat_open_input(&avFormatContext, fileName.c_str(), iformat, &d); if(av_dict_count(d) == 0) { ofLog(OF_LOG_VERBOSE, "OMXPlayer::OpenFile - avformat_open_input enabled SEEKING "); if(fileName.substr(0,7) == "http://") avFormatContext->pb->seekable = AVIO_SEEKABLE_NORMAL; } av_dict_free(&d); if(result < 0) { ofLog(OF_LOG_ERROR, "OMXPlayer::OpenFile - avformat_open_input %s ", fileName.c_str()); close(); return false; } } else { fileObject = new File(); if (!fileObject->open(fileName, flags)) { ofLog(OF_LOG_ERROR, "OMXPlayer::OpenFile - %s ", fileName.c_str()); close(); return false; } buffer = (unsigned char*)av_malloc(FFMPEG_FILE_BUFFER_SIZE); avioContext = avio_alloc_context(buffer, FFMPEG_FILE_BUFFER_SIZE, 0, fileObject, read_callback, NULL, seek_callback); avioContext->max_packet_size = 6144; if(avioContext->max_packet_size) avioContext->max_packet_size *= FFMPEG_FILE_BUFFER_SIZE / avioContext->max_packet_size; if(fileObject->IoControl(IOCTRL_SEEK_POSSIBLE, NULL) == 0) avioContext->seekable = 0; av_probe_input_buffer(avioContext, &iformat, fileName.c_str(), NULL, 0, 0); if(!iformat) { ofLog(OF_LOG_ERROR, "OMXPlayer::OpenFile - av_probe_input_buffer %s ", fileName.c_str()); close(); return false; } //#warning experimental //iformat->flags |= AVFMT_SEEK_TO_PTS; avFormatContext = avformat_alloc_context(); avFormatContext->pb = avioContext; result = avformat_open_input(&avFormatContext, fileName.c_str(), iformat, NULL); if(result < 0) { close(); return false; } } // set the interrupt callback, appeared in libavformat 53.15.0 avFormatContext->interrupt_callback = int_cb; isMatroska = strncmp(avFormatContext->iformat->name, "matroska", 8) == 0; // for "matroska.webm" isAVI = strcmp(avFormatContext->iformat->name, "avi") == 0; // if format can be nonblocking, let's use that avFormatContext->flags |= AVFMT_FLAG_NONBLOCK; // analyse very short to speed up mjpeg playback start if (iformat && (strcmp(iformat->name, "mjpeg") == 0) && avioContext->seekable == 0) avFormatContext->max_analyze_duration = 500000; if(/*isAVI || */isMatroska) avFormatContext->max_analyze_duration = 0; if(!doSkipAvProbe) { unsigned long long startTime = ofGetElapsedTimeMillis(); result = avformat_find_stream_info(avFormatContext, NULL); unsigned long long endTime = ofGetElapsedTimeMillis(); ofLogNotice(__func__) << "avformat_find_stream_info TOOK " << endTime-startTime << " MS"; if(result < 0) { close(); return false; } } if(!getStreams()) { close(); return false; } if(fileObject) { int64_t len = fileObject->GetLength(); int64_t tim = getStreamLength(); if(len > 0 && tim > 0) { unsigned rate = len * 1000 / tim; unsigned maxrate = rate + 1024 * 1024 / 8; if(fileObject->IoControl(IOCTRL_CACHE_SETRATE, &maxrate) >= 0) ofLog(OF_LOG_VERBOSE, "OMXPlayer::OpenFile - set cache throttle rate to %u bytes per second", maxrate); } } speed = DVD_PLAYSPEED_NORMAL; /* if(dump_format) av_dump_format(avFormatContext, 0, fileName.c_str(), 0);*/ updateCurrentPTS(); isOpen = true; return true; }
int avformat_OpenDemux( vlc_object_t *p_this ) { demux_t *p_demux = (demux_t*)p_this; demux_sys_t *p_sys; AVInputFormat *fmt = NULL; vlc_tick_t i_start_time = VLC_TICK_INVALID; bool b_can_seek; const char *psz_url; int error; if( p_demux->psz_filepath ) psz_url = p_demux->psz_filepath; else psz_url = p_demux->psz_url; if( avformat_ProbeDemux( p_this, &fmt, psz_url ) != VLC_SUCCESS ) return VLC_EGENERIC; vlc_stream_Control( p_demux->s, STREAM_CAN_SEEK, &b_can_seek ); /* Fill p_demux fields */ p_demux->pf_demux = Demux; p_demux->pf_control = Control; p_demux->p_sys = p_sys = malloc( sizeof( demux_sys_t ) ); if( !p_sys ) return VLC_ENOMEM; p_sys->ic = 0; p_sys->fmt = fmt; p_sys->tracks = NULL; p_sys->i_ssa_order = 0; TAB_INIT( p_sys->i_attachments, p_sys->attachments); p_sys->p_title = NULL; p_sys->i_seekpoint = 0; p_sys->i_update = 0; /* Create I/O wrapper */ unsigned char * p_io_buffer = av_malloc( AVFORMAT_IOBUFFER_SIZE ); if( !p_io_buffer ) { avformat_CloseDemux( p_this ); return VLC_ENOMEM; } p_sys->ic = avformat_alloc_context(); if( !p_sys->ic ) { av_free( p_io_buffer ); avformat_CloseDemux( p_this ); return VLC_ENOMEM; } AVIOContext *pb = p_sys->ic->pb = avio_alloc_context( p_io_buffer, AVFORMAT_IOBUFFER_SIZE, 0, p_demux, IORead, NULL, IOSeek ); if( !pb ) { av_free( p_io_buffer ); avformat_CloseDemux( p_this ); return VLC_ENOMEM; } p_sys->ic->pb->seekable = b_can_seek ? AVIO_SEEKABLE_NORMAL : 0; error = avformat_open_input(&p_sys->ic, psz_url, p_sys->fmt, NULL); if( error < 0 ) { msg_Err( p_demux, "Could not open %s: %s", psz_url, vlc_strerror_c(AVUNERROR(error)) ); av_free( pb->buffer ); av_free( pb ); p_sys->ic = NULL; avformat_CloseDemux( p_this ); return VLC_EGENERIC; } char *psz_opts = var_InheritString( p_demux, "avformat-options" ); unsigned nb_streams = p_sys->ic->nb_streams; AVDictionary *options[nb_streams ? nb_streams : 1]; options[0] = NULL; for (unsigned i = 1; i < nb_streams; i++) options[i] = NULL; if (psz_opts) { vlc_av_get_options(psz_opts, &options[0]); for (unsigned i = 1; i < nb_streams; i++) { av_dict_copy(&options[i], options[0], 0); } free(psz_opts); } vlc_avcodec_lock(); /* avformat calls avcodec behind our back!!! */ error = avformat_find_stream_info( p_sys->ic, options ); vlc_avcodec_unlock(); AVDictionaryEntry *t = NULL; while ((t = av_dict_get(options[0], "", t, AV_DICT_IGNORE_SUFFIX))) { msg_Err( p_demux, "Unknown option \"%s\"", t->key ); } av_dict_free(&options[0]); for (unsigned i = 1; i < nb_streams; i++) { av_dict_free(&options[i]); } nb_streams = p_sys->ic->nb_streams; /* it may have changed */ if( !nb_streams ) { msg_Err( p_demux, "No streams found"); avformat_CloseDemux( p_this ); return VLC_EGENERIC; } p_sys->tracks = calloc( nb_streams, sizeof(*p_sys->tracks) ); if( !p_sys->tracks ) { avformat_CloseDemux( p_this ); return VLC_ENOMEM; } p_sys->i_tracks = nb_streams; if( error < 0 ) { msg_Warn( p_demux, "Could not find stream info: %s", vlc_strerror_c(AVUNERROR(error)) ); } for( unsigned i = 0; i < nb_streams; i++ ) { struct avformat_track_s *p_track = &p_sys->tracks[i]; AVStream *s = p_sys->ic->streams[i]; const AVCodecParameters *cp = s->codecpar; es_format_t es_fmt; const char *psz_type = "unknown"; /* Do not use the cover art as a stream */ if( s->disposition == AV_DISPOSITION_ATTACHED_PIC ) continue; vlc_fourcc_t fcc = GetVlcFourcc( cp->codec_id ); switch( cp->codec_type ) { case AVMEDIA_TYPE_AUDIO: es_format_Init( &es_fmt, AUDIO_ES, fcc ); es_fmt.i_original_fourcc = CodecTagToFourcc( cp->codec_tag ); es_fmt.i_bitrate = cp->bit_rate; es_fmt.audio.i_channels = cp->channels; es_fmt.audio.i_rate = cp->sample_rate; es_fmt.audio.i_bitspersample = cp->bits_per_coded_sample; es_fmt.audio.i_blockalign = cp->block_align; psz_type = "audio"; if(cp->codec_id == AV_CODEC_ID_AAC_LATM) { es_fmt.i_original_fourcc = VLC_FOURCC('L','A','T','M'); es_fmt.b_packetized = false; } else if(cp->codec_id == AV_CODEC_ID_AAC && p_sys->fmt->long_name && strstr(p_sys->fmt->long_name, "raw ADTS AAC")) { es_fmt.i_original_fourcc = VLC_FOURCC('A','D','T','S'); es_fmt.b_packetized = false; } break; case AVMEDIA_TYPE_VIDEO: es_format_Init( &es_fmt, VIDEO_ES, fcc ); es_fmt.i_original_fourcc = CodecTagToFourcc( cp->codec_tag ); es_fmt.video.i_bits_per_pixel = cp->bits_per_coded_sample; /* Special case for raw video data */ if( cp->codec_id == AV_CODEC_ID_RAWVIDEO ) { msg_Dbg( p_demux, "raw video, pixel format: %i", cp->format ); if( GetVlcChroma( &es_fmt.video, cp->format ) != VLC_SUCCESS) { msg_Err( p_demux, "was unable to find a FourCC match for raw video" ); } else es_fmt.i_codec = es_fmt.video.i_chroma; } /* We need this for the h264 packetizer */ else if( cp->codec_id == AV_CODEC_ID_H264 && ( p_sys->fmt == av_find_input_format("flv") || p_sys->fmt == av_find_input_format("matroska") || p_sys->fmt == av_find_input_format("mp4") ) ) es_fmt.i_original_fourcc = VLC_FOURCC( 'a', 'v', 'c', '1' ); es_fmt.video.i_width = cp->width; es_fmt.video.i_height = cp->height; es_fmt.video.i_visible_width = es_fmt.video.i_width; es_fmt.video.i_visible_height = es_fmt.video.i_height; get_rotation(&es_fmt, s); # warning FIXME: implement palette transmission psz_type = "video"; AVRational rate; #if (LIBAVUTIL_VERSION_MICRO < 100) /* libav */ # if (LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(55, 20, 0)) rate.num = s->time_base.num; rate.den = s->time_base.den; # else rate.num = s->codec->time_base.num; rate.den = s->codec->time_base.den; # endif rate.den *= __MAX( s->codec->ticks_per_frame, 1 ); #else /* ffmpeg */ rate = av_guess_frame_rate( p_sys->ic, s, NULL ); #endif if( rate.den && rate.num ) { es_fmt.video.i_frame_rate = rate.num; es_fmt.video.i_frame_rate_base = rate.den; } AVRational ar; #if (LIBAVUTIL_VERSION_MICRO < 100) /* libav */ ar.num = s->sample_aspect_ratio.num; ar.den = s->sample_aspect_ratio.den; #else ar = av_guess_sample_aspect_ratio( p_sys->ic, s, NULL ); #endif if( ar.num && ar.den ) { es_fmt.video.i_sar_den = ar.den; es_fmt.video.i_sar_num = ar.num; } break; case AVMEDIA_TYPE_SUBTITLE: es_format_Init( &es_fmt, SPU_ES, fcc ); es_fmt.i_original_fourcc = CodecTagToFourcc( cp->codec_tag ); if( strncmp( p_sys->ic->iformat->name, "matroska", 8 ) == 0 && cp->codec_id == AV_CODEC_ID_DVD_SUBTITLE && cp->extradata != NULL && cp->extradata_size > 0 ) { char *psz_start; char *psz_buf = malloc( cp->extradata_size + 1); if( psz_buf != NULL ) { memcpy( psz_buf, cp->extradata , cp->extradata_size ); psz_buf[cp->extradata_size] = '\0'; psz_start = strstr( psz_buf, "size:" ); if( psz_start && vobsub_size_parse( psz_start, &es_fmt.subs.spu.i_original_frame_width, &es_fmt.subs.spu.i_original_frame_height ) == VLC_SUCCESS ) { msg_Dbg( p_demux, "original frame size: %dx%d", es_fmt.subs.spu.i_original_frame_width, es_fmt.subs.spu.i_original_frame_height ); } else { msg_Warn( p_demux, "reading original frame size failed" ); } psz_start = strstr( psz_buf, "palette:" ); if( psz_start && vobsub_palette_parse( psz_start, &es_fmt.subs.spu.palette[1] ) == VLC_SUCCESS ) { es_fmt.subs.spu.palette[0] = SPU_PALETTE_DEFINED; msg_Dbg( p_demux, "vobsub palette read" ); } else { msg_Warn( p_demux, "reading original palette failed" ); } free( psz_buf ); } } else if( cp->codec_id == AV_CODEC_ID_DVB_SUBTITLE && cp->extradata_size > 3 ) { es_fmt.subs.dvb.i_id = GetWBE( cp->extradata ) | (GetWBE( cp->extradata + 2 ) << 16); } else if( cp->codec_id == AV_CODEC_ID_MOV_TEXT ) { if( cp->extradata_size && (es_fmt.p_extra = malloc(cp->extradata_size)) ) { memcpy( es_fmt.p_extra, cp->extradata, cp->extradata_size ); es_fmt.i_extra = cp->extradata_size; } } psz_type = "subtitle"; break; default: es_format_Init( &es_fmt, UNKNOWN_ES, 0 ); es_fmt.i_original_fourcc = CodecTagToFourcc( cp->codec_tag ); #ifdef HAVE_AVUTIL_CODEC_ATTACHMENT if( cp->codec_type == AVMEDIA_TYPE_ATTACHMENT ) { input_attachment_t *p_attachment; psz_type = "attachment"; if( cp->codec_id == AV_CODEC_ID_TTF ) { AVDictionaryEntry *filename = av_dict_get( s->metadata, "filename", NULL, 0 ); if( filename && filename->value ) { p_attachment = vlc_input_attachment_New( filename->value, "application/x-truetype-font", NULL, cp->extradata, (int)cp->extradata_size ); if( p_attachment ) TAB_APPEND( p_sys->i_attachments, p_sys->attachments, p_attachment ); } } else msg_Warn( p_demux, "unsupported attachment type (%u) in avformat demux", cp->codec_id ); } else #endif { if( cp->codec_type == AVMEDIA_TYPE_DATA ) psz_type = "data"; msg_Warn( p_demux, "unsupported track type (%u:%u) in avformat demux", cp->codec_type, cp->codec_id ); } break; } AVDictionaryEntry *language = av_dict_get( s->metadata, "language", NULL, 0 ); if ( language && language->value ) es_fmt.psz_language = strdup( language->value ); if( s->disposition & AV_DISPOSITION_DEFAULT ) es_fmt.i_priority = ES_PRIORITY_SELECTABLE_MIN + 1000; #ifdef HAVE_AVUTIL_CODEC_ATTACHMENT if( cp->codec_type != AVMEDIA_TYPE_ATTACHMENT ) #endif if( cp->codec_type != AVMEDIA_TYPE_DATA ) { const bool b_ogg = !strcmp( p_sys->fmt->name, "ogg" ); const uint8_t *p_extra = cp->extradata; unsigned i_extra = cp->extradata_size; if( cp->codec_id == AV_CODEC_ID_THEORA && b_ogg ) { unsigned pi_size[3]; const void *pp_data[3]; unsigned i_count; for( i_count = 0; i_count < 3; i_count++ ) { if( i_extra < 2 ) break; pi_size[i_count] = GetWBE( p_extra ); pp_data[i_count] = &p_extra[2]; if( i_extra < pi_size[i_count] + 2 ) break; p_extra += 2 + pi_size[i_count]; i_extra -= 2 + pi_size[i_count]; } if( i_count > 0 && xiph_PackHeaders( &es_fmt.i_extra, &es_fmt.p_extra, pi_size, pp_data, i_count ) ) { es_fmt.i_extra = 0; es_fmt.p_extra = NULL; } } else if( cp->codec_id == AV_CODEC_ID_SPEEX && b_ogg ) { const uint8_t p_dummy_comment[] = { 0, 0, 0, 0, 0, 0, 0, 0, }; unsigned pi_size[2]; const void *pp_data[2]; pi_size[0] = i_extra; pp_data[0] = p_extra; pi_size[1] = sizeof(p_dummy_comment); pp_data[1] = p_dummy_comment; if( pi_size[0] > 0 && xiph_PackHeaders( &es_fmt.i_extra, &es_fmt.p_extra, pi_size, pp_data, 2 ) ) { es_fmt.i_extra = 0; es_fmt.p_extra = NULL; } } else if( cp->codec_id == AV_CODEC_ID_OPUS ) { const uint8_t p_dummy_comment[] = { 'O', 'p', 'u', 's', 'T', 'a', 'g', 's', 0, 0, 0, 0, /* Vendor String length */ /* Vendor String */ 0, 0, 0, 0, /* User Comment List Length */ }; unsigned pi_size[2]; const void *pp_data[2]; pi_size[0] = i_extra; pp_data[0] = p_extra; pi_size[1] = sizeof(p_dummy_comment); pp_data[1] = p_dummy_comment; if( pi_size[0] > 0 && xiph_PackHeaders( &es_fmt.i_extra, &es_fmt.p_extra, pi_size, pp_data, 2 ) ) { es_fmt.i_extra = 0; es_fmt.p_extra = NULL; } } else if( cp->extradata_size > 0 && !es_fmt.i_extra ) { es_fmt.p_extra = malloc( i_extra ); if( es_fmt.p_extra ) { es_fmt.i_extra = i_extra; memcpy( es_fmt.p_extra, p_extra, i_extra ); } } p_track->p_es = es_out_Add( p_demux->out, &es_fmt ); if( p_track->p_es && (s->disposition & AV_DISPOSITION_DEFAULT) ) es_out_Control( p_demux->out, ES_OUT_SET_ES_DEFAULT, p_track->p_es ); msg_Dbg( p_demux, "adding es: %s codec = %4.4s (%d)", psz_type, (char*)&fcc, cp->codec_id ); } es_format_Clean( &es_fmt ); } if( p_sys->ic->start_time != (int64_t)AV_NOPTS_VALUE ) i_start_time = FROM_AV_TS(p_sys->ic->start_time); msg_Dbg( p_demux, "AVFormat(%s %s) supported stream", AVPROVIDER(LIBAVFORMAT), LIBAVFORMAT_IDENT ); msg_Dbg( p_demux, " - format = %s (%s)", p_sys->fmt->name, p_sys->fmt->long_name ); msg_Dbg( p_demux, " - start time = %"PRId64, i_start_time ); msg_Dbg( p_demux, " - duration = %"PRId64, ( p_sys->ic->duration != (int64_t)AV_NOPTS_VALUE ) ? FROM_AV_TS(p_sys->ic->duration) : -1 ); if( p_sys->ic->nb_chapters > 0 ) { p_sys->p_title = vlc_input_title_New(); p_sys->p_title->i_length = FROM_AV_TS(p_sys->ic->duration); } for( unsigned i = 0; i < p_sys->ic->nb_chapters; i++ ) { seekpoint_t *s = vlc_seekpoint_New(); AVDictionaryEntry *title = av_dict_get( p_sys->ic->metadata, "title", NULL, 0); if( title && title->value ) { s->psz_name = strdup( title->value ); EnsureUTF8( s->psz_name ); msg_Dbg( p_demux, " - chapter %d: %s", i, s->psz_name ); } s->i_time_offset = vlc_tick_from_samples( p_sys->ic->chapters[i]->start * p_sys->ic->chapters[i]->time_base.num, p_sys->ic->chapters[i]->time_base.den ) - (i_start_time != VLC_TICK_INVALID ? i_start_time : 0 ); TAB_APPEND( p_sys->p_title->i_seekpoint, p_sys->p_title->seekpoint, s ); } ResetTime( p_demux, 0 ); return VLC_SUCCESS; }
static int init_filter(struct dec_audio *da, AVPacket *pkt) { struct spdifContext *spdif_ctx = da->priv; int profile = FF_PROFILE_UNKNOWN; if (spdif_ctx->codec_id == AV_CODEC_ID_DTS) profile = determine_codec_profile(da, pkt); AVFormatContext *lavf_ctx = avformat_alloc_context(); if (!lavf_ctx) goto fail; spdif_ctx->lavf_ctx = lavf_ctx; lavf_ctx->oformat = av_guess_format("spdif", NULL, NULL); if (!lavf_ctx->oformat) goto fail; void *buffer = av_mallocz(OUTBUF_SIZE); if (!buffer) abort(); lavf_ctx->pb = avio_alloc_context(buffer, OUTBUF_SIZE, 1, spdif_ctx, NULL, write_packet, NULL); if (!lavf_ctx->pb) { av_free(buffer); goto fail; } // Request minimal buffering (not available on Libav) #if LIBAVFORMAT_VERSION_MICRO >= 100 lavf_ctx->pb->direct = 1; #endif AVStream *stream = avformat_new_stream(lavf_ctx, 0); if (!stream) goto fail; stream->codecpar->codec_id = spdif_ctx->codec_id; AVDictionary *format_opts = NULL; int num_channels = 0; int sample_format = 0; int samplerate = 0; switch (spdif_ctx->codec_id) { case AV_CODEC_ID_AAC: sample_format = AF_FORMAT_S_AAC; samplerate = 48000; num_channels = 2; break; case AV_CODEC_ID_AC3: sample_format = AF_FORMAT_S_AC3; samplerate = 48000; num_channels = 2; break; case AV_CODEC_ID_DTS: { bool is_hd = profile == FF_PROFILE_DTS_HD_HRA || profile == FF_PROFILE_DTS_HD_MA || profile == FF_PROFILE_UNKNOWN; if (spdif_ctx->use_dts_hd && is_hd) { av_dict_set(&format_opts, "dtshd_rate", "768000", 0); // 4*192000 sample_format = AF_FORMAT_S_DTSHD; samplerate = 192000; num_channels = 2*4; } else { sample_format = AF_FORMAT_S_DTS; samplerate = 48000; num_channels = 2; } break; } case AV_CODEC_ID_EAC3: sample_format = AF_FORMAT_S_EAC3; samplerate = 192000; num_channels = 2; break; case AV_CODEC_ID_MP3: sample_format = AF_FORMAT_S_MP3; samplerate = 48000; num_channels = 2; break; case AV_CODEC_ID_TRUEHD: sample_format = AF_FORMAT_S_TRUEHD; samplerate = 192000; num_channels = 8; break; default: abort(); } mp_audio_set_num_channels(&spdif_ctx->fmt, num_channels); mp_audio_set_format(&spdif_ctx->fmt, sample_format); spdif_ctx->fmt.rate = samplerate; if (avformat_write_header(lavf_ctx, &format_opts) < 0) { MP_FATAL(da, "libavformat spdif initialization failed.\n"); av_dict_free(&format_opts); goto fail; } av_dict_free(&format_opts); spdif_ctx->need_close = true; return 0; fail: uninit(da); return -1; }
static int icecast_open(URLContext *h, const char *uri, int flags) { IcecastContext *s = h->priv_data; // Dict to set options that we pass to the HTTP protocol AVDictionary *opt_dict = NULL; // URI part variables char h_url[1024], host[1024], auth[1024], path[1024]; char *headers = NULL, *user = NULL; int port, ret; AVBPrint bp; if (flags & AVIO_FLAG_READ) return AVERROR(ENOSYS); av_bprint_init(&bp, 0, 1); // Build header strings cat_header(&bp, "Ice-Name", s->name); cat_header(&bp, "Ice-Description", s->description); cat_header(&bp, "Ice-URL", s->url); cat_header(&bp, "Ice-Genre", s->genre); cat_header(&bp, "Ice-Public", s->public ? "1" : "0"); if (!av_bprint_is_complete(&bp)) { ret = AVERROR(ENOMEM); goto cleanup; } av_bprint_finalize(&bp, &headers); // Set options av_dict_set(&opt_dict, "method", s->legacy_icecast ? "SOURCE" : "PUT", 0); av_dict_set(&opt_dict, "auth_type", "basic", 0); av_dict_set(&opt_dict, "headers", headers, 0); if (NOT_EMPTY(s->content_type)) av_dict_set(&opt_dict, "content_type", s->content_type, 0); if (NOT_EMPTY(s->user_agent)) av_dict_set(&opt_dict, "user_agent", s->user_agent, 0); // Parse URI av_url_split(NULL, 0, auth, sizeof(auth), host, sizeof(host), &port, path, sizeof(path), uri); // Check for auth data in URI if (auth[0]) { char *sep = strchr(auth, ':'); if (sep) { *sep = 0; sep++; if (s->pass) { av_free(s->pass); av_log(h, AV_LOG_WARNING, "Overwriting -password <pass> with URI password!\n"); } if (!(s->pass = av_strdup(sep))) { ret = AVERROR(ENOMEM); goto cleanup; } } if (!(user = av_strdup(auth))) { ret = AVERROR(ENOMEM); goto cleanup; } } // Build new authstring snprintf(auth, sizeof(auth), "%s:%s", user ? user : DEFAULT_ICE_USER, s->pass ? s->pass : ""); // Check for mountpoint (path) if (!path[0] || strcmp(path, "/") == 0) { av_log(h, AV_LOG_ERROR, "No mountpoint (path) specified!\n"); ret = AVERROR(EIO); goto cleanup; } // Build new URI for passing to http protocol ff_url_join(h_url, sizeof(h_url), "http", auth, host, port, "%s", path); // Finally open http proto handler ret = ffurl_open(&s->hd, h_url, AVIO_FLAG_READ_WRITE, NULL, &opt_dict); cleanup: av_freep(&user); av_freep(&headers); av_dict_free(&opt_dict); return ret; }
static av_cold int ff_libnvenc_init(AVCodecContext *avctx) { NvEncContext *nvenc_ctx = (NvEncContext*)avctx->priv_data; int x264_argc; char **x264_argv; // Basic nvenc_ctx->nvenc_cfg.width = avctx->width; nvenc_ctx->nvenc_cfg.height = avctx->height; nvenc_ctx->nvenc_cfg.frameRateNum = avctx->time_base.den; nvenc_ctx->nvenc_cfg.frameRateDen = avctx->time_base.num * avctx->ticks_per_frame; // Codec if (avctx->profile >= 0) nvenc_ctx->nvenc_cfg.profile = avctx->profile; if (avctx->gop_size >= 0) nvenc_ctx->nvenc_cfg.gopLength = avctx->gop_size; else if (!(avctx->flags & CODEC_FLAG_CLOSED_GOP)) nvenc_ctx->nvenc_cfg.gopLength = UINT_MAX; // infinite GOP if (avctx->max_b_frames >= 0) nvenc_ctx->nvenc_cfg.numBFrames = avctx->max_b_frames; if (avctx->refs >= 0) nvenc_ctx->nvenc_cfg.numRefFrames = avctx->refs; if (avctx->flags & CODEC_FLAG_INTERLACED_DCT) nvenc_ctx->nvenc_cfg.fieldMode = 2; // Rate-control if (avctx->bit_rate > 0) { nvenc_ctx->nvenc_cfg.rateControl = 2; nvenc_ctx->nvenc_cfg.avgBitRate = avctx->bit_rate; } if (avctx->rc_max_rate >= 0) { nvenc_ctx->nvenc_cfg.rateControl = 1; nvenc_ctx->nvenc_cfg.peakBitRate = avctx->rc_max_rate; } if (avctx->qmin >= 0) nvenc_ctx->nvenc_cfg.qpMin = avctx->qmin; if (avctx->qmax >= 0) nvenc_ctx->nvenc_cfg.qpMax = avctx->qmax; if (avctx->rc_buffer_size > 0) { nvenc_ctx->nvenc_cfg.vbvBufferSize = avctx->rc_buffer_size; if (avctx->rc_initial_buffer_occupancy >= 0) { nvenc_ctx->nvenc_cfg.vbvInitialDelay = avctx->rc_initial_buffer_occupancy / avctx->rc_buffer_size; } } // Codec-specific if (avctx->level >= 0) nvenc_ctx->nvenc_cfg.level = avctx->level; if (avctx->gop_size >= 0) nvenc_ctx->nvenc_cfg.idrPeriod = avctx->gop_size; if (avctx->slices > 0) { nvenc_ctx->nvenc_cfg.sliceMode = 3; nvenc_ctx->nvenc_cfg.sliceModeData = avctx->slices; } else if (avctx->rtp_payload_size > 0) { nvenc_ctx->nvenc_cfg.sliceMode = 1; nvenc_ctx->nvenc_cfg.sliceModeData = avctx->rtp_payload_size; } if (avctx->coder_type == FF_CODER_TYPE_AC) nvenc_ctx->nvenc_cfg.enableCABAC = 1; if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) nvenc_ctx->nvenc_cfg.enableRepeatSPSPPS = 1; // Allocate list of x264 options x264_argc = 0; x264_argv = av_calloc(255, sizeof(char*)); if (!x264_argv) return -1; // ffmpeg-x264 parameters OPT_STRSTR("preset", nvenc_ctx->preset); OPT_STRSTR("tune", nvenc_ctx->tune); OPT_STRSTR("profile", nvenc_ctx->profile); OPT_STRSTR("level", nvenc_ctx->level); OPT_NUMSTR("qp", nvenc_ctx->cqp); OPT_NUMSTR("intra-refresh", nvenc_ctx->intra_refresh); OPT_NUMSTR("aud", nvenc_ctx->aud); OPT_STRSTR("deblock", nvenc_ctx->deblock); OPT_NUMSTR("direct-pred", nvenc_ctx->direct_pred); OPT_NUMSTR("nal_hrd", nvenc_ctx->nal_hrd); OPT_NUMSTR("8x8dct", nvenc_ctx->dct8x8); // x264-style extra parameters if (nvenc_ctx->x264_params) { AVDictionary *param_dict = NULL; AVDictionaryEntry *param_entry = NULL; if (!av_dict_parse_string(¶m_dict, nvenc_ctx->x264_params, "=", ":", 0)) { while ((param_entry = av_dict_get(param_dict, "", param_entry, AV_DICT_IGNORE_SUFFIX))) { x264_argv[x264_argc++] = av_strdup(param_entry->key); x264_argv[x264_argc++] = av_strdup(param_entry->value); } av_dict_free(¶m_dict); } } // x264-style extra options if (nvenc_ctx->x264_opts) { AVDictionary *param_dict = NULL; AVDictionaryEntry *param_entry = NULL; if (!av_dict_parse_string(¶m_dict, nvenc_ctx->x264_opts, "=", ":", 0)) { while ((param_entry = av_dict_get(param_dict, "", param_entry, AV_DICT_IGNORE_SUFFIX))) { x264_argv[x264_argc++] = av_strdup(param_entry->key); x264_argv[x264_argc++] = av_strdup(param_entry->value); } av_dict_free(¶m_dict); } } // Notify encoder to use the list of x264 options nvenc_ctx->nvenc_cfg.x264_paramc = x264_argc; nvenc_ctx->nvenc_cfg.x264_paramv = x264_argv; // Create and initialize nvencoder nvenc_ctx->nvenc = nvenc_open(&nvenc_ctx->nvenc_cfg); if (!nvenc_ctx->nvenc) return -1; avctx->coded_frame = av_frame_alloc(); if (!avctx->coded_frame) return AVERROR(ENOMEM); avctx->has_b_frames = (nvenc_ctx->nvenc_cfg.numBFrames > 0) ? 1 : 0; if (avctx->max_b_frames < 0) avctx->max_b_frames = 0; avctx->bit_rate = nvenc_ctx->nvenc_cfg.avgBitRate; return 0; }
static void init_fps(int bf, int audio_preroll, int fps) { AVStream *st; ctx = avformat_alloc_context(); if (!ctx) exit(1); ctx->oformat = av_guess_format(format, NULL, NULL); if (!ctx->oformat) exit(1); ctx->pb = avio_alloc_context(iobuf, sizeof(iobuf), AVIO_FLAG_WRITE, NULL, NULL, io_write, NULL); if (!ctx->pb) exit(1); ctx->flags |= AVFMT_FLAG_BITEXACT; st = avformat_new_stream(ctx, NULL); if (!st) exit(1); st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = AV_CODEC_ID_H264; st->codec->width = 640; st->codec->height = 480; st->time_base.num = 1; st->time_base.den = 30; st->codec->extradata_size = sizeof(h264_extradata); st->codec->extradata = av_mallocz(st->codec->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE); if (!st->codec->extradata) exit(1); memcpy(st->codec->extradata, h264_extradata, sizeof(h264_extradata)); st->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; video_st = st; st = avformat_new_stream(ctx, NULL); if (!st) exit(1); st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_id = AV_CODEC_ID_AAC; st->codec->sample_rate = 44100; st->codec->channels = 2; st->time_base.num = 1; st->time_base.den = 44100; st->codec->extradata_size = sizeof(aac_extradata); st->codec->extradata = av_mallocz(st->codec->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE); if (!st->codec->extradata) exit(1); memcpy(st->codec->extradata, aac_extradata, sizeof(aac_extradata)); st->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; audio_st = st; if (avformat_write_header(ctx, &opts) < 0) exit(1); av_dict_free(&opts); frames = 0; gop_size = 30; duration = video_st->time_base.den / fps; audio_duration = 1024 * audio_st->time_base.den / audio_st->codec->sample_rate; if (audio_preroll) audio_preroll = 2048 * audio_st->time_base.den / audio_st->codec->sample_rate; bframes = bf; video_dts = bframes ? -duration : 0; audio_dts = -audio_preroll; }
VideoEncoder::VideoEncoder(Muxer* muxer, const QString& codec_name, const std::vector<std::pair<QString, QString> >& codec_options, unsigned int bit_rate, unsigned int width, unsigned int height, unsigned int frame_rate) : BaseEncoder(muxer) { try { m_bit_rate = bit_rate; m_width = width; m_height = height; m_frame_rate = frame_rate; m_opt_threads = std::max(1u, std::thread::hardware_concurrency()); m_opt_minrate = (unsigned int) -1; m_opt_maxrate = (unsigned int) -1; m_opt_bufsize = (unsigned int) -1; #if !SSR_USE_AVCODEC_PRIVATE_CRF m_opt_crf = (unsigned int) -1; #endif #if !SSR_USE_AVCODEC_PRIVATE_PRESET m_opt_preset = ""; #endif if(m_width == 0 || m_height == 0) { Logger::LogError("[VideoEncoder::Init] " + QObject::tr("Error: Width or height is zero!")); throw LibavException(); } if(m_width > 10000 || m_height > 10000) { Logger::LogError("[VideoEncoder::Init] " + QObject::tr("Error: Width or height is too large, the maximum width and height is %1!").arg(10000)); throw LibavException(); } if(m_width % 2 != 0 || m_height % 2 != 0) { Logger::LogError("[VideoEncoder::Init] " + QObject::tr("Error: Width or height is not an even number!")); throw LibavException(); } if(m_frame_rate == 0) { Logger::LogError("[VideoEncoder::Init] " + QObject::tr("Error: Frame rate it zero!")); throw LibavException(); } // start the encoder AVDictionary *options = NULL; try { for(unsigned int i = 0; i < codec_options.size(); ++i) { if(codec_options[i].first == "threads") m_opt_threads = codec_options[i].second.toUInt(); else if(codec_options[i].first == "minrate") m_opt_minrate = codec_options[i].second.toUInt() * 1024; // kbps else if(codec_options[i].first == "maxrate") m_opt_maxrate = codec_options[i].second.toUInt() * 1024; // kbps else if(codec_options[i].first == "bufsize") m_opt_bufsize = codec_options[i].second.toUInt() * 1024; // kbit #if !SSR_USE_AVCODEC_PRIVATE_PRESET else if(codec_options[i].first == "crf") m_opt_crf = codec_options[i].second.toUInt(); #endif #if !SSR_USE_AVCODEC_PRIVATE_PRESET else if(codec_options[i].first == "preset") m_opt_preset = codec_options[i].second; #endif else av_dict_set(&options, codec_options[i].first.toAscii().constData(), codec_options[i].second.toAscii().constData(), 0); } CreateCodec(codec_name, &options); av_dict_free(&options); } catch(...) { av_dict_free(&options); throw; } #if !SSR_USE_AVCODEC_ENCODE_VIDEO2 // allocate a temporary buffer // Apparently libav/ffmpeg completely ignores the size of the buffer, and if it's too small it just crashes. // Originally it was 256k, which is large enough for about 99.9% of the packets, but it still occasionally crashes. // So now I'm using a buffer that's always at least large enough to hold a 256k header and *two* completely uncompressed frames. // (one YUV frame takes w * h * 1.5 bytes) // Newer versions of libav/ffmpeg have deprecated avcodec_encode_video and added a new function which does the allocation // automatically, just like avcodec_encode_audio2, but that function isn't available in Ubuntu 12.04/12.10 yet. m_temp_buffer.resize(std::max<unsigned int>(FF_MIN_BUFFER_SIZE, 256 * 1024 + m_width * m_height * 3)); #endif GetMuxer()->RegisterEncoder(GetStreamIndex(), this); } catch(...) { Destruct(); throw; } }
SoundSource::OpenResult SoundSourceFFmpeg::tryOpen(const AudioSourceConfig& /*audioSrcCfg*/) { AVDictionary *l_iFormatOpts = nullptr; const QString localFileName(getLocalFileName()); qDebug() << "New SoundSourceFFmpeg :" << localFileName; DEBUG_ASSERT(!m_pFormatCtx); m_pFormatCtx = avformat_alloc_context(); if (m_pFormatCtx == nullptr) { qDebug() << "SoundSourceFFmpeg::tryOpen: Can't allocate memory"; return OpenResult::FAILED; } // TODO() why is this required, should't it be a runtime check #if LIBAVCODEC_VERSION_INT < 3622144 // 55.69.0 m_pFormatCtx->max_analyze_duration = 999999999; #endif // libav replaces open() with ff_win32_open() which accepts a // Utf8 path // see: avformat/os_support.h // The old method defining an URL_PROTOCOL is deprecated #if defined(_WIN32) && !defined(__MINGW32CE__) const QByteArray qBAFilename( avformat_version() >= ((52<<16)+(0<<8)+0) ? getLocalFileName().toUtf8() : getLocalFileName().toLocal8Bit()); #else const QByteArray qBAFilename(getLocalFileName().toLocal8Bit()); #endif // Open file and make m_pFormatCtx if (avformat_open_input(&m_pFormatCtx, qBAFilename.constData(), nullptr, &l_iFormatOpts) != 0) { qDebug() << "SoundSourceFFmpeg::tryOpen: cannot open" << localFileName; return OpenResult::FAILED; } // TODO() why is this required, should't it be a runtime check #if LIBAVCODEC_VERSION_INT > 3544932 // 54.23.100 av_dict_free(&l_iFormatOpts); #endif // Retrieve stream information if (avformat_find_stream_info(m_pFormatCtx, nullptr) < 0) { qDebug() << "SoundSourceFFmpeg::tryOpen: cannot open" << localFileName; return OpenResult::FAILED; } //debug only (Enable if needed) //av_dump_format(m_pFormatCtx, 0, qBAFilename.constData(), false); // Find the first audio stream m_iAudioStream = -1; for (unsigned int i = 0; i < m_pFormatCtx->nb_streams; i++) if (m_pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) { m_iAudioStream = i; break; } if (m_iAudioStream == -1) { qDebug() << "SoundSourceFFmpeg::tryOpen: cannot find an audio stream: cannot open" << localFileName; return OpenResult::FAILED; } // Get a pointer to the codec context for the audio stream m_pCodecCtx = m_pFormatCtx->streams[m_iAudioStream]->codec; // Find the decoder for the audio stream if (!(m_pCodec = avcodec_find_decoder(m_pCodecCtx->codec_id))) { qDebug() << "SoundSourceFFmpeg::tryOpen: cannot find a decoder for" << localFileName; return OpenResult::UNSUPPORTED_FORMAT; } if (avcodec_open2(m_pCodecCtx, m_pCodec, nullptr)<0) { qDebug() << "SoundSourceFFmpeg::tryOpen: cannot open" << localFileName; return OpenResult::FAILED; } m_pResample = std::make_unique<EncoderFfmpegResample>(m_pCodecCtx); m_pResample->openMixxx(m_pCodecCtx->sample_fmt, AV_SAMPLE_FMT_FLT); setChannelCount(m_pCodecCtx->channels); setSamplingRate(m_pCodecCtx->sample_rate); setFrameCount((qint64)round((double)((double)m_pFormatCtx->duration * (double)m_pCodecCtx->sample_rate) / (double)AV_TIME_BASE)); qDebug() << "SoundSourceFFmpeg::tryOpen: Sampling rate: " << getSamplingRate() << ", Channels: " << getChannelCount() << "\n"; if (getChannelCount() > 2) { qDebug() << "ffmpeg: No support for more than 2 channels!"; return OpenResult::FAILED; } return OpenResult::SUCCEEDED; }
CameraDevice* CameraDevice::open(QString devName, VideoMode mode) { if (!getDefaultInputFormat()) return nullptr; if (devName == "none") { qDebug() << "Tried to open the null device"; return nullptr; } AVDictionary* options = nullptr; if (!iformat); #ifdef Q_OS_LINUX else if (devName.startsWith("x11grab#")) { QSize screen; if (mode.width && mode.height) { screen.setWidth(mode.width); screen.setHeight(mode.height); } else { screen = QApplication::desktop()->screenGeometry().size(); // Workaround https://trac.ffmpeg.org/ticket/4574 by choping 1 px bottom and right screen.setWidth(screen.width()-1); screen.setHeight(screen.height()-1); } av_dict_set(&options, "video_size", QString("%1x%2").arg(screen.width()).arg(screen.height()).toStdString().c_str(), 0); if (mode.FPS) av_dict_set(&options, "framerate", QString().setNum(mode.FPS).toStdString().c_str(), 0); else av_dict_set(&options, "framerate", QString().setNum(5).toStdString().c_str(), 0); } #endif #ifdef Q_OS_WIN else if (devName.startsWith("gdigrab#")) { av_dict_set(&options, "framerate", QString().setNum(5).toStdString().c_str(), 0); } #endif #ifdef Q_OS_WIN else if (iformat->name == QString("dshow") && mode) { av_dict_set(&options, "video_size", QString("%1x%2").arg(mode.width).arg(mode.height).toStdString().c_str(), 0); av_dict_set(&options, "framerate", QString().setNum(mode.FPS).toStdString().c_str(), 0); } #endif #ifdef Q_OS_LINUX else if (iformat->name == QString("video4linux2,v4l2") && mode) { av_dict_set(&options, "video_size", QString("%1x%2").arg(mode.width).arg(mode.height).toStdString().c_str(), 0); av_dict_set(&options, "framerate", QString().setNum(mode.FPS).toStdString().c_str(), 0); } #endif else if (mode) { qWarning() << "Video mode-setting not implemented for input "<<iformat->name; (void)mode; } CameraDevice* dev = open(devName, &options); if (options) av_dict_free(&options); return dev; }
static int encavcodecaInit(hb_work_object_t *w, hb_job_t *job) { AVCodec *codec; AVCodecContext *context; hb_audio_t *audio = w->audio; hb_work_private_t *pv = calloc(1, sizeof(hb_work_private_t)); w->private_data = pv; pv->job = job; pv->list = hb_list_init(); // channel count, layout and matrix encoding int matrix_encoding; uint64_t channel_layout = hb_ff_mixdown_xlat(audio->config.out.mixdown, &matrix_encoding); pv->out_discrete_channels = hb_mixdown_get_discrete_channel_count(audio->config.out.mixdown); // default settings and options AVDictionary *av_opts = NULL; const char *codec_name = NULL; enum AVCodecID codec_id = AV_CODEC_ID_NONE; enum AVSampleFormat sample_fmt = AV_SAMPLE_FMT_FLTP; int bits_per_raw_sample = 0; int profile = FF_PROFILE_UNKNOWN; // override with encoder-specific values switch (audio->config.out.codec) { case HB_ACODEC_AC3: codec_id = AV_CODEC_ID_AC3; if (matrix_encoding != AV_MATRIX_ENCODING_NONE) av_dict_set(&av_opts, "dsur_mode", "on", 0); break; case HB_ACODEC_FDK_AAC: case HB_ACODEC_FDK_HAAC: codec_name = "libfdk_aac"; sample_fmt = AV_SAMPLE_FMT_S16; bits_per_raw_sample = 16; switch (audio->config.out.codec) { case HB_ACODEC_FDK_HAAC: profile = FF_PROFILE_AAC_HE; break; default: profile = FF_PROFILE_AAC_LOW; break; } // Libav's libfdk-aac wrapper expects back channels for 5.1 // audio, and will error out unless we translate the layout if (channel_layout == AV_CH_LAYOUT_5POINT1) channel_layout = AV_CH_LAYOUT_5POINT1_BACK; break; case HB_ACODEC_FFAAC: codec_name = "aac"; av_dict_set(&av_opts, "stereo_mode", "ms_off", 0); break; case HB_ACODEC_FFFLAC: case HB_ACODEC_FFFLAC24: codec_id = AV_CODEC_ID_FLAC; switch (audio->config.out.codec) { case HB_ACODEC_FFFLAC24: sample_fmt = AV_SAMPLE_FMT_S32; bits_per_raw_sample = 24; break; default: sample_fmt = AV_SAMPLE_FMT_S16; bits_per_raw_sample = 16; break; } break; default: hb_error("encavcodecaInit: unsupported codec (0x%x)", audio->config.out.codec); return 1; } if (codec_name != NULL) { codec = avcodec_find_encoder_by_name(codec_name); if (codec == NULL) { hb_error("encavcodecaInit: avcodec_find_encoder_by_name(%s) failed", codec_name); return 1; } } else { codec = avcodec_find_encoder(codec_id); if (codec == NULL) { hb_error("encavcodecaInit: avcodec_find_encoder(%d) failed", codec_id); return 1; } } // allocate the context and apply the settings context = avcodec_alloc_context3(codec); hb_ff_set_sample_fmt(context, codec, sample_fmt); context->bits_per_raw_sample = bits_per_raw_sample; context->profile = profile; context->channel_layout = channel_layout; context->channels = pv->out_discrete_channels; context->sample_rate = audio->config.out.samplerate; if (audio->config.out.bitrate > 0) { context->bit_rate = audio->config.out.bitrate * 1000; } else if (audio->config.out.quality >= 0) { context->global_quality = audio->config.out.quality * FF_QP2LAMBDA; context->flags |= CODEC_FLAG_QSCALE; } if (audio->config.out.compression_level >= 0) { context->compression_level = audio->config.out.compression_level; } // For some codecs, libav requires the following flag to be set // so that it fills extradata with global header information. // If this flag is not set, it inserts the data into each // packet instead. context->flags |= CODEC_FLAG_GLOBAL_HEADER; if (hb_avcodec_open(context, codec, &av_opts, 0)) { hb_error("encavcodecaInit: hb_avcodec_open() failed"); return 1; } // avcodec_open populates the opts dictionary with the // things it didn't recognize. AVDictionaryEntry *t = NULL; while ((t = av_dict_get(av_opts, "", t, AV_DICT_IGNORE_SUFFIX))) { hb_log("encavcodecaInit: Unknown avcodec option %s", t->key); } av_dict_free(&av_opts); pv->context = context; audio->config.out.samples_per_frame = pv->samples_per_frame = context->frame_size; pv->input_samples = context->frame_size * context->channels; pv->input_buf = malloc(pv->input_samples * sizeof(float)); // Some encoders in libav (e.g. fdk-aac) fail if the output buffer // size is not some minumum value. 8K seems to be enough :( pv->max_output_bytes = MAX(FF_MIN_BUFFER_SIZE, (pv->input_samples * av_get_bytes_per_sample(context->sample_fmt))); // sample_fmt conversion if (context->sample_fmt != AV_SAMPLE_FMT_FLT) { pv->output_buf = malloc(pv->max_output_bytes); pv->avresample = avresample_alloc_context(); if (pv->avresample == NULL) { hb_error("encavcodecaInit: avresample_alloc_context() failed"); return 1; } av_opt_set_int(pv->avresample, "in_sample_fmt", AV_SAMPLE_FMT_FLT, 0); av_opt_set_int(pv->avresample, "out_sample_fmt", context->sample_fmt, 0); av_opt_set_int(pv->avresample, "in_channel_layout", context->channel_layout, 0); av_opt_set_int(pv->avresample, "out_channel_layout", context->channel_layout, 0); if (hb_audio_dither_is_supported(audio->config.out.codec)) { // dithering needs the sample rate av_opt_set_int(pv->avresample, "in_sample_rate", context->sample_rate, 0); av_opt_set_int(pv->avresample, "out_sample_rate", context->sample_rate, 0); av_opt_set_int(pv->avresample, "dither_method", audio->config.out.dither_method, 0); } if (avresample_open(pv->avresample)) { hb_error("encavcodecaInit: avresample_open() failed"); avresample_free(&pv->avresample); return 1; } } else { pv->avresample = NULL; pv->output_buf = pv->input_buf; } if (context->extradata != NULL) { memcpy(w->config->extradata.bytes, context->extradata, context->extradata_size); w->config->extradata.length = context->extradata_size; } audio->config.out.delay = av_rescale_q(context->delay, context->time_base, (AVRational){1, 90000}); return 0; }
static av_cold int libx265_encode_init(AVCodecContext *avctx) { libx265Context *ctx = avctx->priv_data; if (avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL && !av_pix_fmt_desc_get(avctx->pix_fmt)->log2_chroma_w) { av_log(avctx, AV_LOG_ERROR, "4:2:2 and 4:4:4 support is not fully defined for HEVC yet. " "Set -strict experimental to encode anyway.\n"); return AVERROR(ENOSYS); } avctx->coded_frame = av_frame_alloc(); if (!avctx->coded_frame) { av_log(avctx, AV_LOG_ERROR, "Could not allocate frame.\n"); return AVERROR(ENOMEM); } ctx->params = x265_param_alloc(); if (!ctx->params) { av_log(avctx, AV_LOG_ERROR, "Could not allocate x265 param structure.\n"); return AVERROR(ENOMEM); } if (x265_param_default_preset(ctx->params, ctx->preset, ctx->tune) < 0) { av_log(avctx, AV_LOG_ERROR, "Invalid preset or tune.\n"); return AVERROR(EINVAL); } ctx->params->frameNumThreads = avctx->thread_count; ctx->params->fpsNum = avctx->time_base.den; ctx->params->fpsDenom = avctx->time_base.num * avctx->ticks_per_frame; ctx->params->sourceWidth = avctx->width; ctx->params->sourceHeight = avctx->height; ctx->params->bEnablePsnr = !!(avctx->flags & CODEC_FLAG_PSNR); if (avctx->sample_aspect_ratio.num > 0 && avctx->sample_aspect_ratio.den > 0) { char sar[12]; int sar_num, sar_den; av_reduce(&sar_num, &sar_den, avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 65535); snprintf(sar, sizeof(sar), "%d:%d", sar_num, sar_den); if (x265_param_parse(ctx->params, "sar", sar) == X265_PARAM_BAD_VALUE) { av_log(avctx, AV_LOG_ERROR, "Invalid SAR: %d:%d.\n", sar_num, sar_den); return AVERROR_INVALIDDATA; } } switch (avctx->pix_fmt) { case AV_PIX_FMT_YUV420P: case AV_PIX_FMT_YUV420P10: ctx->params->internalCsp = X265_CSP_I420; break; case AV_PIX_FMT_YUV422P: case AV_PIX_FMT_YUV422P10: ctx->params->internalCsp = X265_CSP_I422; break; case AV_PIX_FMT_YUV444P: case AV_PIX_FMT_YUV444P10: ctx->params->internalCsp = X265_CSP_I444; break; } if (ctx->crf >= 0) { char crf[6]; snprintf(crf, sizeof(crf), "%2.2f", ctx->crf); if (x265_param_parse(ctx->params, "crf", crf) == X265_PARAM_BAD_VALUE) { av_log(avctx, AV_LOG_ERROR, "Invalid crf: %2.2f.\n", ctx->crf); return AVERROR(EINVAL); } } else if (avctx->bit_rate > 0) { ctx->params->rc.bitrate = avctx->bit_rate / 1000; ctx->params->rc.rateControlMode = X265_RC_ABR; } if (!(avctx->flags & CODEC_FLAG_GLOBAL_HEADER)) ctx->params->bRepeatHeaders = 1; if (ctx->x265_opts) { AVDictionary *dict = NULL; AVDictionaryEntry *en = NULL; if (!av_dict_parse_string(&dict, ctx->x265_opts, "=", ":", 0)) { while ((en = av_dict_get(dict, "", en, AV_DICT_IGNORE_SUFFIX))) { int parse_ret = x265_param_parse(ctx->params, en->key, en->value); switch (parse_ret) { case X265_PARAM_BAD_NAME: av_log(avctx, AV_LOG_WARNING, "Unknown option: %s.\n", en->key); break; case X265_PARAM_BAD_VALUE: av_log(avctx, AV_LOG_WARNING, "Invalid value for %s: %s.\n", en->key, en->value); break; default: break; } } av_dict_free(&dict); } } ctx->encoder = x265_encoder_open(ctx->params); if (!ctx->encoder) { av_log(avctx, AV_LOG_ERROR, "Cannot open libx265 encoder.\n"); libx265_encode_close(avctx); return AVERROR_INVALIDDATA; } if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) { x265_nal *nal; int nnal; avctx->extradata_size = x265_encoder_headers(ctx->encoder, &nal, &nnal); if (avctx->extradata_size <= 0) { av_log(avctx, AV_LOG_ERROR, "Cannot encode headers.\n"); libx265_encode_close(avctx); return AVERROR_INVALIDDATA; } avctx->extradata = av_malloc(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE); if (!avctx->extradata) { av_log(avctx, AV_LOG_ERROR, "Cannot allocate HEVC header of size %d.\n", avctx->extradata_size); libx265_encode_close(avctx); return AVERROR(ENOMEM); } memcpy(avctx->extradata, nal[0].payload, avctx->extradata_size); } return 0; }
void av_metadata_free(AVDictionary **pm) { av_dict_free(pm); }
// open video capture device void VideoFFmpeg::openCam (char *file, short camIdx) { // open camera source AVInputFormat *inputFormat; AVDictionary *formatParams = NULL; char filename[28], rateStr[20]; #ifdef WIN32 // video capture on windows only through Video For Windows driver inputFormat = av_find_input_format("vfwcap"); if (!inputFormat) // Video For Windows not supported?? return; sprintf(filename, "%d", camIdx); #else // In Linux we support two types of devices: VideoForLinux and DV1394. // the user specify it with the filename: // [<device_type>][:<standard>] // <device_type> : 'v4l' for VideoForLinux, 'dv1394' for DV1394. By default 'v4l' // <standard> : 'pal', 'secam' or 'ntsc'. By default 'ntsc' // The driver name is constructed automatically from the device type: // v4l : /dev/video<camIdx> // dv1394: /dev/dv1394/<camIdx> // If you have different driver name, you can specify the driver name explicitly // instead of device type. Examples of valid filename: // /dev/v4l/video0:pal // /dev/ieee1394/1:ntsc // dv1394:secam // v4l:pal char *p; if (file && strstr(file, "1394") != NULL) { // the user specifies a driver, check if it is v4l or d41394 inputFormat = av_find_input_format("dv1394"); sprintf(filename, "/dev/dv1394/%d", camIdx); } else { const char *formats[] = {"video4linux2,v4l2", "video4linux2", "video4linux"}; int i, formatsCount = sizeof(formats) / sizeof(char*); for (i = 0; i < formatsCount; i++) { inputFormat = av_find_input_format(formats[i]); if (inputFormat) break; } sprintf(filename, "/dev/video%d", camIdx); } if (!inputFormat) // these format should be supported, check ffmpeg compilation return; if (file && strncmp(file, "/dev", 4) == 0) { // user does not specify a driver strncpy(filename, file, sizeof(filename)); filename[sizeof(filename)-1] = 0; if ((p = strchr(filename, ':')) != 0) *p = 0; } if (file && (p = strchr(file, ':')) != NULL) { av_dict_set(&formatParams, "standard", p+1, 0); } #endif //frame rate if (m_captRate <= 0.f) m_captRate = defFrameRate; sprintf(rateStr, "%f", m_captRate); av_dict_set(&formatParams, "framerate", rateStr, 0); if (m_captWidth > 0 && m_captHeight > 0) { char video_size[64]; BLI_snprintf(video_size, sizeof(video_size), "%dx%d", m_captWidth, m_captHeight); av_dict_set(&formatParams, "video_size", video_size, 0); } if (openStream(filename, inputFormat, &formatParams) != 0) return; // for video capture it is important to do non blocking read m_formatCtx->flags |= AVFMT_FLAG_NONBLOCK; // open base class VideoBase::openCam(file, camIdx); // check if we should do multi-threading? if (BLI_system_thread_count() > 1) { // no need to thread if the system has a single core m_isThreaded = true; } av_dict_free(&formatParams); }
static int open_slave(AVFormatContext *avf, char *slave, TeeSlave *tee_slave) { int i, ret; AVDictionary *options = NULL; AVDictionaryEntry *entry; char *filename; char *format = NULL, *select = NULL, *on_fail = NULL; char *use_fifo = NULL, *fifo_options_str = NULL; AVFormatContext *avf2 = NULL; AVStream *st, *st2; int stream_count; int fullret; char *subselect = NULL, *next_subselect = NULL, *first_subselect = NULL, *tmp_select = NULL; if ((ret = ff_tee_parse_slave_options(avf, slave, &options, &filename)) < 0) return ret; #define STEAL_OPTION(option, field) do { \ if ((entry = av_dict_get(options, option, NULL, 0))) { \ field = entry->value; \ entry->value = NULL; /* prevent it from being freed */ \ av_dict_set(&options, option, NULL, 0); \ } \ } while (0) STEAL_OPTION("f", format); STEAL_OPTION("select", select); STEAL_OPTION("onfail", on_fail); STEAL_OPTION("use_fifo", use_fifo); STEAL_OPTION("fifo_options", fifo_options_str); ret = parse_slave_failure_policy_option(on_fail, tee_slave); if (ret < 0) { av_log(avf, AV_LOG_ERROR, "Invalid onfail option value, valid options are 'abort' and 'ignore'\n"); goto end; } ret = parse_slave_fifo_options(use_fifo, fifo_options_str, tee_slave); if (ret < 0) { av_log(avf, AV_LOG_ERROR, "Error parsing fifo options: %s\n", av_err2str(ret)); goto end; } if (tee_slave->use_fifo) { if (options) { char *format_options_str = NULL; ret = av_dict_get_string(options, &format_options_str, '=', ':'); if (ret < 0) goto end; ret = av_dict_set(&tee_slave->fifo_options, "format_options", format_options_str, AV_DICT_DONT_STRDUP_VAL); if (ret < 0) goto end; } if (format) { ret = av_dict_set(&tee_slave->fifo_options, "fifo_format", format, AV_DICT_DONT_STRDUP_VAL); format = NULL; if (ret < 0) goto end; } av_dict_free(&options); options = tee_slave->fifo_options; } ret = avformat_alloc_output_context2(&avf2, NULL, tee_slave->use_fifo ? "fifo" :format, filename); if (ret < 0) goto end; tee_slave->avf = avf2; av_dict_copy(&avf2->metadata, avf->metadata, 0); avf2->opaque = avf->opaque; avf2->io_open = avf->io_open; avf2->io_close = avf->io_close; avf2->interrupt_callback = avf->interrupt_callback; avf2->flags = avf->flags; tee_slave->stream_map = av_calloc(avf->nb_streams, sizeof(*tee_slave->stream_map)); if (!tee_slave->stream_map) { ret = AVERROR(ENOMEM); goto end; } stream_count = 0; for (i = 0; i < avf->nb_streams; i++) { st = avf->streams[i]; if (select) { tmp_select = av_strdup(select); // av_strtok is destructive so we regenerate it in each loop if (!tmp_select) { ret = AVERROR(ENOMEM); goto end; } fullret = 0; first_subselect = tmp_select; next_subselect = NULL; while (subselect = av_strtok(first_subselect, slave_select_sep, &next_subselect)) { first_subselect = NULL; ret = avformat_match_stream_specifier(avf, avf->streams[i], subselect); if (ret < 0) { av_log(avf, AV_LOG_ERROR, "Invalid stream specifier '%s' for output '%s'\n", subselect, slave); goto end; } if (ret != 0) { fullret = 1; // match break; } } av_freep(&tmp_select); if (fullret == 0) { /* no match */ tee_slave->stream_map[i] = -1; continue; } } tee_slave->stream_map[i] = stream_count++; if (!(st2 = avformat_new_stream(avf2, NULL))) { ret = AVERROR(ENOMEM); goto end; } ret = ff_stream_encode_params_copy(st2, st); if (ret < 0) goto end; } ret = ff_format_output_open(avf2, filename, NULL); if (ret < 0) { av_log(avf, AV_LOG_ERROR, "Slave '%s': error opening: %s\n", slave, av_err2str(ret)); goto end; } if ((ret = avformat_write_header(avf2, &options)) < 0) { av_log(avf, AV_LOG_ERROR, "Slave '%s': error writing header: %s\n", slave, av_err2str(ret)); goto end; } tee_slave->header_written = 1; tee_slave->bsfs = av_calloc(avf2->nb_streams, sizeof(*tee_slave->bsfs)); if (!tee_slave->bsfs) { ret = AVERROR(ENOMEM); goto end; } entry = NULL; while (entry = av_dict_get(options, "bsfs", NULL, AV_DICT_IGNORE_SUFFIX)) { const char *spec = entry->key + strlen("bsfs"); if (*spec) { if (strspn(spec, slave_bsfs_spec_sep) != 1) { av_log(avf, AV_LOG_ERROR, "Specifier separator in '%s' is '%c', but only characters '%s' " "are allowed\n", entry->key, *spec, slave_bsfs_spec_sep); ret = AVERROR(EINVAL); goto end; } spec++; /* consume separator */ } for (i = 0; i < avf2->nb_streams; i++) { ret = avformat_match_stream_specifier(avf2, avf2->streams[i], spec); if (ret < 0) { av_log(avf, AV_LOG_ERROR, "Invalid stream specifier '%s' in bsfs option '%s' for slave " "output '%s'\n", spec, entry->key, filename); goto end; } if (ret > 0) { av_log(avf, AV_LOG_DEBUG, "spec:%s bsfs:%s matches stream %d of slave " "output '%s'\n", spec, entry->value, i, filename); if (tee_slave->bsfs[i]) { av_log(avf, AV_LOG_WARNING, "Duplicate bsfs specification associated to stream %d of slave " "output '%s', filters will be ignored\n", i, filename); continue; } ret = av_bsf_list_parse_str(entry->value, &tee_slave->bsfs[i]); if (ret < 0) { av_log(avf, AV_LOG_ERROR, "Error parsing bitstream filter sequence '%s' associated to " "stream %d of slave output '%s'\n", entry->value, i, filename); goto end; } } } av_dict_set(&options, entry->key, NULL, 0); } for (i = 0; i < avf->nb_streams; i++){ int target_stream = tee_slave->stream_map[i]; if (target_stream < 0) continue; if (!tee_slave->bsfs[target_stream]) { /* Add pass-through bitstream filter */ ret = av_bsf_get_null_filter(&tee_slave->bsfs[target_stream]); if (ret < 0) { av_log(avf, AV_LOG_ERROR, "Failed to create pass-through bitstream filter: %s\n", av_err2str(ret)); goto end; } } tee_slave->bsfs[target_stream]->time_base_in = avf->streams[i]->time_base; ret = avcodec_parameters_copy(tee_slave->bsfs[target_stream]->par_in, avf->streams[i]->codecpar); if (ret < 0) goto end; ret = av_bsf_init(tee_slave->bsfs[target_stream]); if (ret < 0) { av_log(avf, AV_LOG_ERROR, "Failed to initialize bitstream filter(s): %s\n", av_err2str(ret)); goto end; } } if (options) { entry = NULL; while ((entry = av_dict_get(options, "", entry, AV_DICT_IGNORE_SUFFIX))) av_log(avf2, AV_LOG_ERROR, "Unknown option '%s'\n", entry->key); ret = AVERROR_OPTION_NOT_FOUND; goto end; } end: av_free(format); av_free(select); av_free(on_fail); av_dict_free(&options); av_freep(&tmp_select); return ret; }
int ff_rtp_chain_mux_open(AVFormatContext **out, AVFormatContext *s, AVStream *st, URLContext *handle, int packet_size, int idx) { AVFormatContext *rtpctx = NULL; int ret; AVOutputFormat *rtp_format = av_guess_format("rtp", NULL, NULL); uint8_t *rtpflags; AVDictionary *opts = NULL; if (!rtp_format) { ret = AVERROR(ENOSYS); goto fail; } /* Allocate an AVFormatContext for each output stream */ rtpctx = avformat_alloc_context(); if (!rtpctx) { ret = AVERROR(ENOMEM); goto fail; } rtpctx->oformat = rtp_format; if (!avformat_new_stream(rtpctx, NULL)) { ret = AVERROR(ENOMEM); goto fail; } /* Pass the interrupt callback on */ rtpctx->interrupt_callback = s->interrupt_callback; /* Copy the max delay setting; the rtp muxer reads this. */ rtpctx->max_delay = s->max_delay; /* Copy other stream parameters. */ rtpctx->streams[0]->sample_aspect_ratio = st->sample_aspect_ratio; rtpctx->flags |= s->flags & AVFMT_FLAG_BITEXACT; /* Get the payload type from the codec */ if (st->id < RTP_PT_PRIVATE) rtpctx->streams[0]->id = ff_rtp_get_payload_type(s, st->codecpar, idx); else rtpctx->streams[0]->id = st->id; if (av_opt_get(s, "rtpflags", AV_OPT_SEARCH_CHILDREN, &rtpflags) >= 0) av_dict_set(&opts, "rtpflags", rtpflags, AV_DICT_DONT_STRDUP_VAL); /* Set the synchronized start time. */ rtpctx->start_time_realtime = s->start_time_realtime; avcodec_parameters_copy(rtpctx->streams[0]->codecpar, st->codecpar); rtpctx->streams[0]->time_base = st->time_base; if (handle) { ret = ffio_fdopen(&rtpctx->pb, handle); if (ret < 0) ffurl_close(handle); } else ret = ffio_open_dyn_packet_buf(&rtpctx->pb, packet_size); if (!ret) ret = avformat_write_header(rtpctx, &opts); av_dict_free(&opts); if (ret) { if (handle && rtpctx->pb) { avio_closep(&rtpctx->pb); } else if (rtpctx->pb) { ffio_free_dyn_buf(&rtpctx->pb); } avformat_free_context(rtpctx); return ret; } *out = rtpctx; return 0; fail: avformat_free_context(rtpctx); if (handle) ffurl_close(handle); return ret; }
int avfilter_init_str(AVFilterContext *filter, const char *args) { AVDictionary *options = NULL; AVDictionaryEntry *e; int ret = 0; if (args && *args) { if (!filter->filter->priv_class) { av_log(filter, AV_LOG_ERROR, "This filter does not take any " "options, but options were provided: %s.\n", args); return AVERROR(EINVAL); } #if FF_API_OLD_FILTER_OPTS if ( !strcmp(filter->filter->name, "format") || !strcmp(filter->filter->name, "noformat") || !strcmp(filter->filter->name, "frei0r") || !strcmp(filter->filter->name, "frei0r_src") || !strcmp(filter->filter->name, "ocv") || !strcmp(filter->filter->name, "pan") || !strcmp(filter->filter->name, "pp") || !strcmp(filter->filter->name, "aevalsrc")) { /* a hack for compatibility with the old syntax * replace colons with |s */ char *copy = av_strdup(args); char *p = copy; int nb_leading = 0; // number of leading colons to skip int deprecated = 0; if (!copy) { ret = AVERROR(ENOMEM); goto fail; } if (!strcmp(filter->filter->name, "frei0r") || !strcmp(filter->filter->name, "ocv")) nb_leading = 1; else if (!strcmp(filter->filter->name, "frei0r_src")) nb_leading = 3; while (nb_leading--) { p = strchr(p, ':'); if (!p) { p = copy + strlen(copy); break; } p++; } deprecated = strchr(p, ':') != NULL; if (!strcmp(filter->filter->name, "aevalsrc")) { deprecated = 0; while ((p = strchr(p, ':')) && p[1] != ':') { const char *epos = strchr(p + 1, '='); const char *spos = strchr(p + 1, ':'); const int next_token_is_opt = epos && (!spos || epos < spos); if (next_token_is_opt) { p++; break; } /* next token does not contain a '=', assume a channel expression */ deprecated = 1; *p++ = '|'; } if (p && *p == ':') { // double sep '::' found deprecated = 1; memmove(p, p + 1, strlen(p)); } } else while ((p = strchr(p, ':'))) *p++ = '|'; if (deprecated) av_log(filter, AV_LOG_WARNING, "This syntax is deprecated. Use " "'|' to separate the list items.\n"); av_log(filter, AV_LOG_DEBUG, "compat: called with args=[%s]\n", copy); ret = process_options(filter, &options, copy); av_freep(©); if (ret < 0) goto fail; #endif } else { ret = process_options(filter, &options, args); if (ret < 0) goto fail; } } ret = avfilter_init_dict(filter, &options); if (ret < 0) goto fail; if ((e = av_dict_get(options, "", NULL, AV_DICT_IGNORE_SUFFIX))) { av_log(filter, AV_LOG_ERROR, "No such option: %s.\n", e->key); ret = AVERROR_OPTION_NOT_FOUND; goto fail; } fail: av_dict_free(&options); return ret; }
static bool ffemu_init_video(struct ff_video_info *video, const struct ffemu_params *param) { #ifdef HAVE_X264RGB AVCodec *codec = NULL; if (g_settings.video.h264_record) { codec = avcodec_find_encoder_by_name("libx264rgb"); // Older versions of FFmpeg have RGB encoding in libx264. if (!codec) codec = avcodec_find_encoder_by_name("libx264"); } else codec = avcodec_find_encoder_by_name("ffv1"); #else AVCodec *codec = avcodec_find_encoder_by_name("ffv1"); #endif if (!codec) return false; video->encoder = codec; #if AV_HAVE_BIGENDIAN video->fmt = PIX_FMT_RGB555BE; #else video->fmt = PIX_FMT_RGB555LE; #endif video->pix_size = sizeof(uint16_t); if (param->rgb32) { video->fmt = PIX_FMT_RGB32; video->pix_size = sizeof(uint32_t); } #ifdef HAVE_X264RGB video->pix_fmt = g_settings.video.h264_record ? PIX_FMT_BGR24 : PIX_FMT_RGB32; #else video->pix_fmt = PIX_FMT_RGB32; #endif #ifdef HAVE_FFMPEG_ALLOC_CONTEXT3 video->codec = avcodec_alloc_context3(codec); #else video->codec = avcodec_alloc_context(); avcodec_get_context_defaults(video->codec); #endif video->codec->width = param->out_width; video->codec->height = param->out_height; video->codec->time_base = av_d2q(1.0 / param->fps, 1000000); // Arbitrary big number. video->codec->sample_aspect_ratio = av_d2q(param->aspect_ratio * param->out_height / param->out_width, 255); video->codec->pix_fmt = video->pix_fmt; #ifdef HAVE_FFMPEG_AVCODEC_OPEN2 AVDictionary *opts = NULL; #endif #ifdef HAVE_X264RGB if (g_settings.video.h264_record) { video->codec->thread_count = 3; av_dict_set(&opts, "qp", "0", 0); } else video->codec->thread_count = 2; #else video->codec->thread_count = 2; #endif #ifdef HAVE_FFMPEG_AVCODEC_OPEN2 if (avcodec_open2(video->codec, codec, &opts) != 0) #else if (avcodec_open(video->codec, codec) != 0) #endif return false; #ifdef HAVE_FFMPEG_AVCODEC_OPEN2 if (opts) av_dict_free(&opts); #endif // Allocate a big buffer :p ffmpeg API doesn't seem to give us some clues how big this buffer should be. video->outbuf_size = 1 << 23; video->outbuf = (uint8_t*)av_malloc(video->outbuf_size); size_t size = avpicture_get_size(video->pix_fmt, param->out_width, param->out_height); video->conv_frame_buf = (uint8_t*)av_malloc(size); video->conv_frame = avcodec_alloc_frame(); avpicture_fill((AVPicture*)video->conv_frame, video->conv_frame_buf, video->pix_fmt, param->out_width, param->out_height); return true; }
int dc_video_decoder_open(VideoInputFile *video_input_file, VideoDataConf *video_data_conf, int mode, int no_loop, int nb_consumers) { s32 ret; u32 i; s32 open_res; AVInputFormat *in_fmt = NULL; AVDictionary *options = NULL; AVCodecContext *codec_ctx; AVCodec *codec; memset(video_input_file, 0, sizeof(VideoInputFile)); if (video_data_conf->width > 0 && video_data_conf->height > 0) { char vres[16]; snprintf(vres, sizeof(vres), "%dx%d", video_data_conf->width, video_data_conf->height); ret = av_dict_set(&options, "video_size", vres, 0); if (ret < 0) { GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Could not set video size %s.\n", vres)); return -1; } } if (video_data_conf->framerate > 0) { char vfr[16]; snprintf(vfr, sizeof(vfr), "%d", video_data_conf->framerate); ret = av_dict_set(&options, "framerate", vfr, 0); if (ret < 0) { GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Could not set video framerate %s.\n", vfr)); return -1; } } if (strlen(video_data_conf->pixel_format)) { ret = av_dict_set(&options, "pixel_format", video_data_conf->pixel_format, 0); if (ret < 0) { GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Could not set pixel format %s.\n", video_data_conf->pixel_format)); return -1; } } #ifndef WIN32 if (strcmp(video_data_conf->v4l2f, "") != 0) { ret = av_dict_set(&options, "input_format", video_data_conf->v4l2f, 0); if (ret < 0) { GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Could not set input format %s.\n", video_data_conf->v4l2f)); return -1; } } #endif if (video_data_conf->format && strcmp(video_data_conf->format, "") != 0) { in_fmt = av_find_input_format(video_data_conf->format); if (in_fmt == NULL) { GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Cannot find the format %s.\n", video_data_conf->format)); return -1; } } video_input_file->av_fmt_ctx = NULL; if (video_data_conf->demux_buffer_size) { char szBufSize[100]; sprintf(szBufSize, "%d", video_data_conf->demux_buffer_size); ret = av_dict_set(&options, "buffer_size", szBufSize, 0); if (ret < 0) { GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Could not set demuxer's input buffer size.\n")); return -1; } } /* Open video */ open_res = avformat_open_input(&video_input_file->av_fmt_ctx, video_data_conf->filename, in_fmt, options ? &options : NULL); if ( (open_res < 0) && !stricmp(video_data_conf->filename, "screen-capture-recorder") ) { GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Buggy screen capture input (open failed with code %d), retrying without specifying resolution\n", open_res)); av_dict_set(&options, "video_size", NULL, 0); open_res = avformat_open_input(&video_input_file->av_fmt_ctx, video_data_conf->filename, in_fmt, options ? &options : NULL); } if ( (open_res < 0) && options) { GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Error %d opening input - retrying without options\n", open_res)); av_dict_free(&options); open_res = avformat_open_input(&video_input_file->av_fmt_ctx, video_data_conf->filename, in_fmt, NULL); } if (open_res < 0) { GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Cannot open file %s\n", video_data_conf->filename)); return -1; } /* Retrieve stream information */ if (avformat_find_stream_info(video_input_file->av_fmt_ctx, NULL) < 0) { GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Cannot find stream information\n")); return -1; } av_dump_format(video_input_file->av_fmt_ctx, 0, video_data_conf->filename, 0); /* Find the first video stream */ video_input_file->vstream_idx = -1; for (i = 0; i < video_input_file->av_fmt_ctx->nb_streams; i++) { if (video_input_file->av_fmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) { video_input_file->vstream_idx = i; break; } } if (video_input_file->vstream_idx == -1) { GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Cannot find a video stream\n")); return -1; } /* Get a pointer to the codec context for the video stream */ codec_ctx = video_input_file->av_fmt_ctx->streams[video_input_file->vstream_idx]->codec; /* Find the decoder for the video stream */ codec = avcodec_find_decoder(codec_ctx->codec_id); if (codec == NULL) { GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Codec is not supported.\n")); if (!video_input_file->av_fmt_ctx_ref_cnt) avformat_close_input(&video_input_file->av_fmt_ctx); return -1; } /* Open codec */ if (avcodec_open2(codec_ctx, codec, NULL) < 0) { GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Cannot open codec.\n")); if (!video_input_file->av_fmt_ctx_ref_cnt) avformat_close_input(&video_input_file->av_fmt_ctx); return -1; } video_input_file->width = codec_ctx->width; video_input_file->height = codec_ctx->height; video_input_file->sar = codec_ctx->sample_aspect_ratio; video_input_file->pix_fmt = codec_ctx->pix_fmt; if (codec_ctx->time_base.num==1) { GF_LOG(GF_LOG_WARNING, GF_LOG_DASH, ("AVCTX give frame duration of %d/%d - keeping requested rate %d, but this may result in unexpected behaviour.\n", codec_ctx->time_base.num, codec_ctx->time_base.den, video_data_conf->framerate )); if (codec_ctx->time_base.den==1000000) { codec_ctx->time_base.num = codec_ctx->time_base.den / video_data_conf->framerate; } } else if (video_data_conf->framerate >= 0 && codec_ctx->time_base.num) { video_data_conf->framerate = codec_ctx->time_base.den / codec_ctx->time_base.num; } if (video_data_conf->framerate <= 1 || video_data_conf->framerate > 1000) { const int num = video_input_file->av_fmt_ctx->streams[video_input_file->vstream_idx]->avg_frame_rate.num; const int den = video_input_file->av_fmt_ctx->streams[video_input_file->vstream_idx]->avg_frame_rate.den == 0 ? 1 : video_input_file->av_fmt_ctx->streams[video_input_file->vstream_idx]->avg_frame_rate.den; video_data_conf->framerate = num / den; if (video_data_conf->framerate / 1000 != 0) { GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Framerate %d was divided by 1000: %d\n", video_data_conf->framerate, video_data_conf->framerate/1000)); video_data_conf->framerate = video_data_conf->framerate / 1000; } if (video_data_conf->framerate <= 1 || video_data_conf->framerate > 1000) { video_data_conf->framerate = num / den; if (video_data_conf->framerate / 1000 != 0) { video_data_conf->framerate = video_data_conf->framerate / 1000; } } } if (video_data_conf->framerate <= 1 || video_data_conf->framerate > 1000) { GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Invalid input framerate %d (AVCTX timebase is %d/%d).\n", video_data_conf->framerate, codec_ctx->time_base.num, codec_ctx->time_base.den)); return -1; } video_data_conf->time_base = video_input_file->av_fmt_ctx->streams[video_input_file->vstream_idx]->time_base; video_input_file->mode = mode; video_input_file->no_loop = no_loop; video_input_file->nb_consumers = nb_consumers; return 0; }