bool stream_remux(stream_t *stream, int (*write)(void *opaque, uint8_t *buf, int buf_size), void *opaque) { const char *format_name; AVOutputFormat *dst_format; AVFormatContext *dst_ctx; AVStream *dst_stream; uint8_t *dst_iobuf; AVIOContext *dst_ioctx; if (stream->dst_codec_type == CODEC_TYPE_MP3) { format_name = "mp3"; } else if (stream->dst_codec_type == CODEC_TYPE_OGG_VORBIS) { format_name = "ogg"; } else if (stream->dst_codec_type == CODEC_TYPE_FLAC) { format_name = "flac"; } else if (stream->dst_codec_type == CODEC_TYPE_AAC) { format_name = "aac"; } else if (stream->dst_codec_type == CODEC_TYPE_OPUS) { format_name = "opus"; } else { return false; } dst_format = av_guess_format(format_name, NULL, NULL); if (!dst_format) { musicd_log(LOG_ERROR, "stream", "can't find encoder for %s",format_name); return false; } dst_ctx = avformat_alloc_context(); dst_ctx->oformat = dst_format; dst_stream = avformat_new_stream(dst_ctx, NULL); if (!dst_stream) { musicd_log(LOG_ERROR, "stream", "avformat_new_stream failed"); avformat_free_context(dst_ctx); return false; } av_dict_set(&dst_ctx->metadata, "track", stringf("%02d", stream->track->track), AV_DICT_DONT_STRDUP_VAL); av_dict_set(&dst_ctx->metadata, "title", stream->track->title, 0); av_dict_set(&dst_ctx->metadata, "artist", stream->track->artist, 0); av_dict_set(&dst_ctx->metadata, "album", stream->track->album, 0); avcodec_copy_context(dst_stream->codec, stream->encoder); dst_iobuf = av_mallocz(4096); dst_ioctx = avio_alloc_context(dst_iobuf, 4096, 1, opaque, NULL, write, NULL); if (!dst_ioctx) { musicd_log(LOG_ERROR, "stream", "avio_alloc_context failed"); av_free(dst_iobuf); avformat_free_context(dst_ctx); return false; } dst_ctx->pb = dst_ioctx; stream->dst_ctx = dst_ctx; stream->dst_iobuf = dst_iobuf; stream->dst_ioctx = dst_ioctx; return true; }
static int set_dict_int(AVDictionary **dict, const char *key, int val) { char val_str[256]; snprintf(val_str, sizeof(val_str), "%d", val); return av_dict_set(dict, key, val_str, 0); }
int set_data_source_l(State **ps, const char* path) { printf("set_data_source\n"); int audio_index = -1; int video_index = -1; int i; State *state = *ps; printf("Path: %s\n", path); AVDictionary *options = NULL; av_dict_set(&options, "icy", "1", 0); av_dict_set(&options, "user-agent", "FFmpegMediaMetadataRetriever", 0); if (state->headers) { av_dict_set(&options, "headers", state->headers, 0); } if (state->offset > 0) { state->pFormatCtx = avformat_alloc_context(); state->pFormatCtx->skip_initial_bytes = state->offset; } if (avformat_open_input(&state->pFormatCtx, path, NULL, &options) != 0) { printf("Metadata could not be retrieved\n"); *ps = NULL; return FAILURE; } if (avformat_find_stream_info(state->pFormatCtx, NULL) < 0) { printf("Metadata could not be retrieved\n"); avformat_close_input(&state->pFormatCtx); *ps = NULL; return FAILURE; } set_duration(state->pFormatCtx); set_shoutcast_metadata(state->pFormatCtx); //av_dump_format(state->pFormatCtx, 0, path, 0); // Find the first audio and video stream for (i = 0; i < state->pFormatCtx->nb_streams; i++) { if (state->pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO && video_index < 0) { video_index = i; } if (state->pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO && audio_index < 0) { audio_index = i; } set_codec(state->pFormatCtx, i); } if (audio_index >= 0) { stream_component_open(state, audio_index); } if (video_index >= 0) { stream_component_open(state, video_index); } /*if(state->video_stream < 0 || state->audio_stream < 0) { avformat_close_input(&state->pFormatCtx); *ps = NULL; return FAILURE; }*/ set_rotation(state->pFormatCtx, state->audio_st, state->video_st); set_framerate(state->pFormatCtx, state->audio_st, state->video_st); set_filesize(state->pFormatCtx); set_chapter_count(state->pFormatCtx); /*printf("Found metadata\n"); AVDictionaryEntry *tag = NULL; while ((tag = av_dict_get(state->pFormatCtx->metadata, "", tag, AV_DICT_IGNORE_SUFFIX))) { printf("Key %s: \n", tag->key); printf("Value %s: \n", tag->value); }*/ *ps = state; return SUCCESS; }
static bool ffemu_init_config(struct ff_config_param *params, const char *config) { params->out_pix_fmt = PIX_FMT_NONE; params->scale_factor = 1; params->threads = 1; params->frame_drop_ratio = 1; if (!config) return true; params->conf = config_file_new(config); if (!params->conf) { RARCH_ERR("Failed to load FFmpeg config \"%s\".\n", config); return false; } config_get_array(params->conf, "vcodec", params->vcodec, sizeof(params->vcodec)); config_get_array(params->conf, "acodec", params->acodec, sizeof(params->acodec)); config_get_array(params->conf, "format", params->format, sizeof(params->format)); config_get_uint(params->conf, "threads", ¶ms->threads); if (!config_get_uint(params->conf, "frame_drop_ratio", ¶ms->frame_drop_ratio) || !params->frame_drop_ratio) params->frame_drop_ratio = 1; if (!config_get_bool(params->conf, "audio_enable", ¶ms->audio_enable)) params->audio_enable = true; config_get_uint(params->conf, "sample_rate", ¶ms->sample_rate); config_get_uint(params->conf, "scale_factor", ¶ms->scale_factor); params->audio_qscale = config_get_int(params->conf, "audio_global_quality", ¶ms->audio_global_quality); config_get_int(params->conf, "audio_bit_rate", ¶ms->audio_bit_rate); params->video_qscale = config_get_int(params->conf, "video_global_quality", ¶ms->video_global_quality); config_get_int(params->conf, "video_bit_rate", ¶ms->video_bit_rate); char pix_fmt[64] = {0}; if (config_get_array(params->conf, "pix_fmt", pix_fmt, sizeof(pix_fmt))) { params->out_pix_fmt = av_get_pix_fmt(pix_fmt); if (params->out_pix_fmt == PIX_FMT_NONE) { RARCH_ERR("Cannot find pix_fmt \"%s\".\n", pix_fmt); return false; } } struct config_file_entry entry; if (!config_get_entry_list_head(params->conf, &entry)) return true; do { if (strstr(entry.key, "video_") == entry.key) { const char *key = entry.key + strlen("video_"); av_dict_set(¶ms->video_opts, key, entry.value, 0); } else if (strstr(entry.key, "audio_") == entry.key) { const char *key = entry.key + strlen("audio_"); av_dict_set(¶ms->audio_opts, key, entry.value, 0); } } while (config_get_entry_list_next(&entry)); return true; }
int ff_rm_read_mdpr_codecdata (AVFormatContext *s, AVIOContext *pb, AVStream *st, RMStream *rst, int codec_data_size, const uint8_t *mime) { unsigned int v; int size; int64_t codec_pos; int ret; avpriv_set_pts_info(st, 64, 1, 1000); codec_pos = avio_tell(pb); v = avio_rb32(pb); if (v == MKTAG(0xfd, 'a', 'r', '.')) { /* ra type header */ if (rm_read_audio_stream_info(s, pb, st, rst, 0)) return -1; } else if (v == MKBETAG('L', 'S', 'D', ':')) { avio_seek(pb, -4, SEEK_CUR); if ((ret = rm_read_extradata(pb, st->codec, codec_data_size)) < 0) return ret; st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_tag = AV_RL32(st->codec->extradata); st->codec->codec_id = ff_codec_get_id(ff_rm_codec_tags, st->codec->codec_tag); } else if(mime && !strcmp(mime, "logical-fileinfo")){ int stream_count, rule_count, property_count, i; ff_free_stream(s, st); if (avio_rb16(pb) != 0) { av_log(s, AV_LOG_WARNING, "Unsupported version\n"); goto skip; } stream_count = avio_rb16(pb); avio_skip(pb, 6*stream_count); rule_count = avio_rb16(pb); avio_skip(pb, 2*rule_count); property_count = avio_rb16(pb); for(i=0; i<property_count; i++){ uint8_t name[128], val[128]; avio_rb32(pb); if (avio_rb16(pb) != 0) { av_log(s, AV_LOG_WARNING, "Unsupported Name value property version\n"); goto skip; //FIXME skip just this one } get_str8(pb, name, sizeof(name)); switch(avio_rb32(pb)) { case 2: get_strl(pb, val, sizeof(val), avio_rb16(pb)); av_dict_set(&s->metadata, name, val, 0); break; default: avio_skip(pb, avio_rb16(pb)); } } } else { int fps; if (avio_rl32(pb) != MKTAG('V', 'I', 'D', 'O')) { fail1: av_log(s, AV_LOG_WARNING, "Unsupported stream type %08x\n", v); goto skip; } st->codec->codec_tag = avio_rl32(pb); st->codec->codec_id = ff_codec_get_id(ff_rm_codec_tags, st->codec->codec_tag); av_dlog(s, "%X %X\n", st->codec->codec_tag, MKTAG('R', 'V', '2', '0')); if (st->codec->codec_id == AV_CODEC_ID_NONE) goto fail1; st->codec->width = avio_rb16(pb); st->codec->height = avio_rb16(pb); avio_skip(pb, 2); // looks like bits per sample avio_skip(pb, 4); // always zero? st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->need_parsing = AVSTREAM_PARSE_TIMESTAMPS; fps = avio_rb32(pb); if ((ret = rm_read_extradata(pb, st->codec, codec_data_size - (avio_tell(pb) - codec_pos))) < 0) return ret; av_reduce(&st->avg_frame_rate.den, &st->avg_frame_rate.num, 0x10000, fps, (1 << 30) - 1); #if FF_API_R_FRAME_RATE st->r_frame_rate = st->avg_frame_rate; #endif } skip: /* skip codec info */ size = avio_tell(pb) - codec_pos; avio_skip(pb, codec_data_size - size); return 0; }
static int ffserver_parse_config_stream(FFServerConfig *config, const char *cmd, const char **p, int line_num, FFServerStream **pstream) { char arg[1024], arg2[1024]; FFServerStream *stream; int val; av_assert0(pstream); stream = *pstream; if (!av_strcasecmp(cmd, "<Stream")) { char *q; FFServerStream *s; stream = av_mallocz(sizeof(FFServerStream)); if (!stream) return AVERROR(ENOMEM); config->dummy_ctx = avcodec_alloc_context3(NULL); if (!config->dummy_ctx) { av_free(stream); return AVERROR(ENOMEM); } ffserver_get_arg(stream->filename, sizeof(stream->filename), p); q = strrchr(stream->filename, '>'); if (q) *q = '\0'; for (s = config->first_stream; s; s = s->next) { if (!strcmp(stream->filename, s->filename)) ERROR("Stream '%s' already registered\n", s->filename); } stream->fmt = ffserver_guess_format(NULL, stream->filename, NULL); if (stream->fmt) { config->audio_id = stream->fmt->audio_codec; config->video_id = stream->fmt->video_codec; } else { config->audio_id = AV_CODEC_ID_NONE; config->video_id = AV_CODEC_ID_NONE; } *pstream = stream; return 0; } av_assert0(stream); if (!av_strcasecmp(cmd, "Feed")) { FFServerStream *sfeed; ffserver_get_arg(arg, sizeof(arg), p); sfeed = config->first_feed; while (sfeed) { if (!strcmp(sfeed->filename, arg)) break; sfeed = sfeed->next_feed; } if (!sfeed) ERROR("Feed with name '%s' for stream '%s' is not defined\n", arg, stream->filename); else stream->feed = sfeed; } else if (!av_strcasecmp(cmd, "Format")) { ffserver_get_arg(arg, sizeof(arg), p); if (!strcmp(arg, "status")) { stream->stream_type = STREAM_TYPE_STATUS; stream->fmt = NULL; } else { stream->stream_type = STREAM_TYPE_LIVE; /* JPEG cannot be used here, so use single frame MJPEG */ if (!strcmp(arg, "jpeg")) strcpy(arg, "mjpeg"); stream->fmt = ffserver_guess_format(arg, NULL, NULL); if (!stream->fmt) ERROR("Unknown Format: %s\n", arg); } if (stream->fmt) { config->audio_id = stream->fmt->audio_codec; config->video_id = stream->fmt->video_codec; } } else if (!av_strcasecmp(cmd, "InputFormat")) { ffserver_get_arg(arg, sizeof(arg), p); stream->ifmt = av_find_input_format(arg); if (!stream->ifmt) ERROR("Unknown input format: %s\n", arg); } else if (!av_strcasecmp(cmd, "FaviconURL")) { if (stream->stream_type == STREAM_TYPE_STATUS) ffserver_get_arg(stream->feed_filename, sizeof(stream->feed_filename), p); else ERROR("FaviconURL only permitted for status streams\n"); } else if (!av_strcasecmp(cmd, "Author") || !av_strcasecmp(cmd, "Comment") || !av_strcasecmp(cmd, "Copyright") || !av_strcasecmp(cmd, "Title")) { char key[32]; int i; ffserver_get_arg(arg, sizeof(arg), p); for (i = 0; i < strlen(cmd); i++) key[i] = av_tolower(cmd[i]); key[i] = 0; WARNING("'%s' option in configuration file is deprecated, " "use 'Metadata %s VALUE' instead\n", cmd, key); if (av_dict_set(&stream->metadata, key, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "Metadata")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_get_arg(arg2, sizeof(arg2), p); if (av_dict_set(&stream->metadata, arg, arg2, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "Preroll")) { ffserver_get_arg(arg, sizeof(arg), p); stream->prebuffer = atof(arg) * 1000; } else if (!av_strcasecmp(cmd, "StartSendOnKey")) { stream->send_on_key = 1; } else if (!av_strcasecmp(cmd, "AudioCodec")) { ffserver_get_arg(arg, sizeof(arg), p); config->audio_id = opt_codec(arg, AVMEDIA_TYPE_AUDIO); if (config->audio_id == AV_CODEC_ID_NONE) ERROR("Unknown AudioCodec: %s\n", arg); } else if (!av_strcasecmp(cmd, "VideoCodec")) { ffserver_get_arg(arg, sizeof(arg), p); config->video_id = opt_codec(arg, AVMEDIA_TYPE_VIDEO); if (config->video_id == AV_CODEC_ID_NONE) ERROR("Unknown VideoCodec: %s\n", arg); } else if (!av_strcasecmp(cmd, "MaxTime")) { ffserver_get_arg(arg, sizeof(arg), p); stream->max_time = atof(arg) * 1000; } else if (!av_strcasecmp(cmd, "AudioBitRate")) { float f; ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_float_param(&f, arg, 1000, 0, FLT_MAX, config, line_num, "Invalid %s: %s\n", cmd, arg); if (av_dict_set_int(&config->audio_conf, cmd, lrintf(f), 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "AudioChannels")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(NULL, arg, 0, 1, 8, config, line_num, "Invalid %s: %s, valid range is 1-8.", cmd, arg); if (av_dict_set(&config->audio_conf, cmd, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "AudioSampleRate")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(NULL, arg, 0, 0, INT_MAX, config, line_num, "Invalid %s: %s", cmd, arg); if (av_dict_set(&config->audio_conf, cmd, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoBitRateRange")) { int minrate, maxrate; ffserver_get_arg(arg, sizeof(arg), p); if (sscanf(arg, "%d-%d", &minrate, &maxrate) == 2) { if (av_dict_set_int(&config->video_conf, "VideoBitRateRangeMin", minrate, 0) < 0 || av_dict_set_int(&config->video_conf, "VideoBitRateRangeMax", maxrate, 0) < 0) goto nomem; } else ERROR("Incorrect format for VideoBitRateRange -- should be " "<min>-<max>: %s\n", arg); } else if (!av_strcasecmp(cmd, "Debug")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(NULL, arg, 0, INT_MIN, INT_MAX, config, line_num, "Invalid %s: %s", cmd, arg); if (av_dict_set(&config->video_conf, cmd, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "Strict")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(NULL, arg, 0, INT_MIN, INT_MAX, config, line_num, "Invalid %s: %s", cmd, arg); if (av_dict_set(&config->video_conf, cmd, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoBufferSize")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(NULL, arg, 8*1024, 0, INT_MAX, config, line_num, "Invalid %s: %s", cmd, arg); if (av_dict_set(&config->video_conf, cmd, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoBitRateTolerance")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(NULL, arg, 1000, INT_MIN, INT_MAX, config, line_num, "Invalid %s: %s", cmd, arg); if (av_dict_set(&config->video_conf, cmd, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoBitRate")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(NULL, arg, 1000, 0, INT_MAX, config, line_num, "Invalid %s: %s", cmd, arg); if (av_dict_set(&config->video_conf, cmd, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoSize")) { int ret, w, h; ffserver_get_arg(arg, sizeof(arg), p); ret = av_parse_video_size(&w, &h, arg); if (ret < 0) ERROR("Invalid video size '%s'\n", arg); else if ((w % 16) || (h % 16)) ERROR("Image size must be a multiple of 16\n"); if (av_dict_set_int(&config->video_conf, "VideoSizeWidth", w, 0) < 0 || av_dict_set_int(&config->video_conf, "VideoSizeHeight", h, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoFrameRate")) { AVRational frame_rate; ffserver_get_arg(arg, sizeof(arg), p); if (av_parse_video_rate(&frame_rate, arg) < 0) { ERROR("Incorrect frame rate: %s\n", arg); } else { if (av_dict_set_int(&config->video_conf, "VideoFrameRateNum", frame_rate.num, 0) < 0 || av_dict_set_int(&config->video_conf, "VideoFrameRateDen", frame_rate.den, 0) < 0) goto nomem; } } else if (!av_strcasecmp(cmd, "PixelFormat")) { enum AVPixelFormat pix_fmt; ffserver_get_arg(arg, sizeof(arg), p); pix_fmt = av_get_pix_fmt(arg); if (pix_fmt == AV_PIX_FMT_NONE) ERROR("Unknown pixel format: %s\n", arg); if (av_dict_set_int(&config->video_conf, cmd, pix_fmt, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoGopSize")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(NULL, arg, 0, INT_MIN, INT_MAX, config, line_num, "Invalid %s: %s", cmd, arg); if (av_dict_set(&config->video_conf, cmd, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoIntraOnly")) { if (av_dict_set(&config->video_conf, cmd, "1", 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoHighQuality")) { if (av_dict_set(&config->video_conf, cmd, "", 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "Video4MotionVector")) { if (av_dict_set(&config->video_conf, cmd, "", 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "AVOptionVideo") || !av_strcasecmp(cmd, "AVOptionAudio")) { int ret; ffserver_get_arg(arg, sizeof(arg), p); ffserver_get_arg(arg2, sizeof(arg2), p); if (!av_strcasecmp(cmd, "AVOptionVideo")) ret = ffserver_save_avoption(arg, arg2, &config->video_opts, AV_OPT_FLAG_VIDEO_PARAM ,config, line_num); else ret = ffserver_save_avoption(arg, arg2, &config->audio_opts, AV_OPT_FLAG_AUDIO_PARAM ,config, line_num); if (ret < 0) goto nomem; } else if (!av_strcasecmp(cmd, "AVPresetVideo") || !av_strcasecmp(cmd, "AVPresetAudio")) { char **preset = NULL; ffserver_get_arg(arg, sizeof(arg), p); if (!av_strcasecmp(cmd, "AVPresetVideo")) { preset = &config->video_preset; ffserver_opt_preset(arg, NULL, 0, NULL, &config->video_id); } else { preset = &config->audio_preset; ffserver_opt_preset(arg, NULL, 0, &config->audio_id, NULL); } *preset = av_strdup(arg); if (!preset) return AVERROR(ENOMEM); } else if (!av_strcasecmp(cmd, "VideoTag")) { ffserver_get_arg(arg, sizeof(arg), p); if (strlen(arg) == 4) { if (av_dict_set(&config->video_conf, "VideoTag", "arg", 0) < 0) goto nomem; } } else if (!av_strcasecmp(cmd, "BitExact")) { if (av_dict_set(&config->video_conf, cmd, "", 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "DctFastint")) { if (av_dict_set(&config->video_conf, cmd, "", 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "IdctSimple")) { if (av_dict_set(&config->video_conf, cmd, "", 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "Qscale")) { ffserver_get_arg(arg, sizeof(arg), p); if (av_dict_set(&config->video_conf, cmd, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoQDiff")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(NULL, arg, 0, 1, 31, config, line_num, "%s out of range\n", cmd); if (av_dict_set(&config->video_conf, cmd, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoQMax")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(NULL, arg, 0, 1, 31, config, line_num, "%s out of range\n", cmd); if (av_dict_set(&config->video_conf, cmd, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoQMin")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(NULL, arg, 0, 1, 31, config, line_num, "%s out of range\n", cmd); if (av_dict_set(&config->video_conf, cmd, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "LumiMask")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_float_param(NULL, arg, 0, -FLT_MAX, FLT_MAX, config, line_num, "Invalid %s: %s", cmd, arg); if (av_dict_set(&config->video_conf, cmd, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "DarkMask")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_float_param(NULL, arg, 0, -FLT_MAX, FLT_MAX, config, line_num, "Invalid %s: %s", cmd, arg); if (av_dict_set(&config->video_conf, cmd, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "NoVideo")) { config->video_id = AV_CODEC_ID_NONE; } else if (!av_strcasecmp(cmd, "NoAudio")) { config->audio_id = AV_CODEC_ID_NONE; } else if (!av_strcasecmp(cmd, "ACL")) { ffserver_parse_acl_row(stream, NULL, NULL, *p, config->filename, line_num); } else if (!av_strcasecmp(cmd, "DynamicACL")) { ffserver_get_arg(stream->dynamic_acl, sizeof(stream->dynamic_acl), p); } else if (!av_strcasecmp(cmd, "RTSPOption")) { ffserver_get_arg(arg, sizeof(arg), p); av_freep(&stream->rtsp_option); stream->rtsp_option = av_strdup(arg); } else if (!av_strcasecmp(cmd, "MulticastAddress")) { ffserver_get_arg(arg, sizeof(arg), p); if (resolve_host(&stream->multicast_ip, arg)) ERROR("Invalid host/IP address: %s\n", arg); stream->is_multicast = 1; stream->loop = 1; /* default is looping */ } else if (!av_strcasecmp(cmd, "MulticastPort")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(&val, arg, 0, 1, 65535, config, line_num, "Invalid MulticastPort: %s\n", arg); stream->multicast_port = val; } else if (!av_strcasecmp(cmd, "MulticastTTL")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(&val, arg, 0, INT_MIN, INT_MAX, config, line_num, "Invalid MulticastTTL: %s\n", arg); stream->multicast_ttl = val; } else if (!av_strcasecmp(cmd, "NoLoop")) { stream->loop = 0; } else if (!av_strcasecmp(cmd, "</Stream>")) { if (stream->feed && stream->fmt && strcmp(stream->fmt->name, "ffm")) { if (config->audio_id != AV_CODEC_ID_NONE) { AVCodecContext *audio_enc = avcodec_alloc_context3(avcodec_find_encoder(config->audio_id)); if (config->audio_preset && ffserver_opt_preset(arg, audio_enc, AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_ENCODING_PARAM, NULL, NULL) < 0) ERROR("Could not apply preset '%s'\n", arg); ffserver_apply_stream_config(audio_enc, config->audio_conf, &config->audio_opts); add_codec(stream, audio_enc); } if (config->video_id != AV_CODEC_ID_NONE) { AVCodecContext *video_enc = avcodec_alloc_context3(avcodec_find_encoder(config->video_id)); if (config->video_preset && ffserver_opt_preset(arg, video_enc, AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_ENCODING_PARAM, NULL, NULL) < 0) ERROR("Could not apply preset '%s'\n", arg); ffserver_apply_stream_config(video_enc, config->video_conf, &config->video_opts); add_codec(stream, video_enc); } } av_dict_free(&config->video_opts); av_dict_free(&config->video_conf); av_dict_free(&config->audio_opts); av_dict_free(&config->audio_conf); av_freep(&config->video_preset); av_freep(&config->audio_preset); avcodec_free_context(&config->dummy_ctx); *pstream = NULL; } else if (!av_strcasecmp(cmd, "File") || !av_strcasecmp(cmd, "ReadOnlyFile")) { ffserver_get_arg(stream->feed_filename, sizeof(stream->feed_filename), p); } else { ERROR("Invalid entry '%s' inside <Stream></Stream>\n", cmd); } return 0; nomem: av_log(NULL, AV_LOG_ERROR, "Out of memory. Aborting.\n"); av_dict_free(&config->video_opts); av_dict_free(&config->video_conf); av_dict_free(&config->audio_opts); av_dict_free(&config->audio_conf); av_freep(&config->video_preset); av_freep(&config->audio_preset); avcodec_free_context(&config->dummy_ctx); return AVERROR(ENOMEM); }
/********************************************************************** * avformatInit ********************************************************************** * Allocates hb_mux_data_t structures, create file and write headers *********************************************************************/ static int avformatInit( hb_mux_object_t * m ) { hb_job_t * job = m->job; hb_audio_t * audio; hb_mux_data_t * track; int meta_mux; int max_tracks; int ii, ret; const char *muxer_name = NULL; uint8_t default_track_flag = 1; uint8_t need_fonts = 0; char *lang; max_tracks = 1 + hb_list_count( job->list_audio ) + hb_list_count( job->list_subtitle ); m->tracks = calloc(max_tracks, sizeof(hb_mux_data_t*)); m->oc = avformat_alloc_context(); if (m->oc == NULL) { hb_error( "Could not initialize avformat context." ); goto error; } AVDictionary * av_opts = NULL; switch (job->mux) { case HB_MUX_AV_MP4: m->time_base.num = 1; m->time_base.den = 90000; if( job->ipod_atom ) muxer_name = "ipod"; else muxer_name = "mp4"; meta_mux = META_MUX_MP4; av_dict_set(&av_opts, "brand", "mp42", 0); if (job->mp4_optimize) av_dict_set(&av_opts, "movflags", "faststart+disable_chpl", 0); else av_dict_set(&av_opts, "movflags", "+disable_chpl", 0); break; case HB_MUX_AV_MKV: // libavformat is essentially hard coded such that it only // works with a timebase of 1/1000 m->time_base.num = 1; m->time_base.den = 1000; muxer_name = "matroska"; meta_mux = META_MUX_MKV; break; default: { hb_error("Invalid Mux %x", job->mux); goto error; } } m->oc->oformat = av_guess_format(muxer_name, NULL, NULL); if(m->oc->oformat == NULL) { hb_error("Could not guess output format %s", muxer_name); goto error; } av_strlcpy(m->oc->filename, job->file, sizeof(m->oc->filename)); ret = avio_open2(&m->oc->pb, job->file, AVIO_FLAG_WRITE, &m->oc->interrupt_callback, NULL); if( ret < 0 ) { hb_error( "avio_open2 failed, errno %d", ret); goto error; } /* Video track */ track = m->tracks[m->ntracks++] = calloc(1, sizeof( hb_mux_data_t ) ); job->mux_data = track; track->type = MUX_TYPE_VIDEO; track->prev_chapter_tc = AV_NOPTS_VALUE; track->st = avformat_new_stream(m->oc, NULL); if (track->st == NULL) { hb_error("Could not initialize video stream"); goto error; } track->st->time_base = m->time_base; avcodec_get_context_defaults3(track->st->codec, NULL); track->st->codec->codec_type = AVMEDIA_TYPE_VIDEO; track->st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; uint8_t *priv_data = NULL; int priv_size = 0; switch (job->vcodec) { case HB_VCODEC_X264: case HB_VCODEC_QSV_H264: track->st->codec->codec_id = AV_CODEC_ID_H264; /* Taken from x264 muxers.c */ priv_size = 5 + 1 + 2 + job->config.h264.sps_length + 1 + 2 + job->config.h264.pps_length; priv_data = av_malloc(priv_size); if (priv_data == NULL) { hb_error("H.264 extradata: malloc failure"); goto error; } priv_data[0] = 1; priv_data[1] = job->config.h264.sps[1]; /* AVCProfileIndication */ priv_data[2] = job->config.h264.sps[2]; /* profile_compat */ priv_data[3] = job->config.h264.sps[3]; /* AVCLevelIndication */ priv_data[4] = 0xff; // nalu size length is four bytes priv_data[5] = 0xe1; // one sps priv_data[6] = job->config.h264.sps_length >> 8; priv_data[7] = job->config.h264.sps_length; memcpy(priv_data+8, job->config.h264.sps, job->config.h264.sps_length); priv_data[8+job->config.h264.sps_length] = 1; // one pps priv_data[9+job->config.h264.sps_length] = job->config.h264.pps_length >> 8; priv_data[10+job->config.h264.sps_length] = job->config.h264.pps_length; memcpy(priv_data+11+job->config.h264.sps_length, job->config.h264.pps, job->config.h264.pps_length ); break; case HB_VCODEC_FFMPEG_MPEG4: track->st->codec->codec_id = AV_CODEC_ID_MPEG4; if (job->config.mpeg4.length != 0) { priv_size = job->config.mpeg4.length; priv_data = av_malloc(priv_size); if (priv_data == NULL) { hb_error("MPEG4 extradata: malloc failure"); goto error; } memcpy(priv_data, job->config.mpeg4.bytes, priv_size); } break; case HB_VCODEC_FFMPEG_MPEG2: track->st->codec->codec_id = AV_CODEC_ID_MPEG2VIDEO; if (job->config.mpeg4.length != 0) { priv_size = job->config.mpeg4.length; priv_data = av_malloc(priv_size); if (priv_data == NULL) { hb_error("MPEG2 extradata: malloc failure"); goto error; } memcpy(priv_data, job->config.mpeg4.bytes, priv_size); } break; case HB_VCODEC_FFMPEG_VP8: track->st->codec->codec_id = AV_CODEC_ID_VP8; priv_data = NULL; priv_size = 0; break; case HB_VCODEC_THEORA: { track->st->codec->codec_id = AV_CODEC_ID_THEORA; int size = 0; ogg_packet *ogg_headers[3]; for (ii = 0; ii < 3; ii++) { ogg_headers[ii] = (ogg_packet *)job->config.theora.headers[ii]; size += ogg_headers[ii]->bytes + 2; } priv_size = size; priv_data = av_malloc(priv_size); if (priv_data == NULL) { hb_error("Theora extradata: malloc failure"); goto error; } size = 0; for(ii = 0; ii < 3; ii++) { AV_WB16(priv_data + size, ogg_headers[ii]->bytes); size += 2; memcpy(priv_data+size, ogg_headers[ii]->packet, ogg_headers[ii]->bytes); size += ogg_headers[ii]->bytes; } } break; case HB_VCODEC_X265: track->st->codec->codec_id = AV_CODEC_ID_HEVC; if (job->config.h265.headers_length > 0) { priv_size = job->config.h265.headers_length; priv_data = av_malloc(priv_size); if (priv_data == NULL) { hb_error("H.265 extradata: malloc failure"); goto error; } memcpy(priv_data, job->config.h265.headers, priv_size); } break; default: hb_error("muxavformat: Unknown video codec: %x", job->vcodec); goto error; } track->st->codec->extradata = priv_data; track->st->codec->extradata_size = priv_size; track->st->sample_aspect_ratio.num = job->par.num; track->st->sample_aspect_ratio.den = job->par.den; track->st->codec->sample_aspect_ratio.num = job->par.num; track->st->codec->sample_aspect_ratio.den = job->par.den; track->st->codec->width = job->width; track->st->codec->height = job->height; track->st->disposition |= AV_DISPOSITION_DEFAULT; hb_rational_t vrate; if( job->pass_id == HB_PASS_ENCODE_2ND ) { hb_interjob_t * interjob = hb_interjob_get( job->h ); vrate = interjob->vrate; } else { vrate = job->vrate; } // If the vrate is 27000000, there's a good chance this is // a standard rate that we have in our hb_video_rates table. // Because of rounding errors and approximations made while // measuring framerate, the actual value may not be exact. So // we look for rates that are "close" and make an adjustment // to fps.den. if (vrate.num == 27000000) { const hb_rate_t *video_framerate = NULL; while ((video_framerate = hb_video_framerate_get_next(video_framerate)) != NULL) { if (abs(vrate.den - video_framerate->rate) < 10) { vrate.den = video_framerate->rate; break; } } } hb_reduce(&vrate.num, &vrate.den, vrate.num, vrate.den); if (job->mux == HB_MUX_AV_MP4) { // libavformat mp4 muxer requires that the codec time_base have the // same denominator as the stream time_base, it uses it for the // mdhd timescale. double scale = (double)track->st->time_base.den / vrate.num; track->st->codec->time_base.den = track->st->time_base.den; track->st->codec->time_base.num = vrate.den * scale; } else { track->st->codec->time_base.num = vrate.den; track->st->codec->time_base.den = vrate.num; } track->st->avg_frame_rate.num = vrate.num; track->st->avg_frame_rate.den = vrate.den; /* add the audio tracks */ for(ii = 0; ii < hb_list_count( job->list_audio ); ii++ ) { audio = hb_list_item( job->list_audio, ii ); track = m->tracks[m->ntracks++] = calloc(1, sizeof( hb_mux_data_t ) ); audio->priv.mux_data = track; track->type = MUX_TYPE_AUDIO; track->st = avformat_new_stream(m->oc, NULL); if (track->st == NULL) { hb_error("Could not initialize audio stream"); goto error; } avcodec_get_context_defaults3(track->st->codec, NULL); track->st->codec->codec_type = AVMEDIA_TYPE_AUDIO; track->st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; if (job->mux == HB_MUX_AV_MP4) { track->st->codec->time_base.num = audio->config.out.samples_per_frame; track->st->codec->time_base.den = audio->config.out.samplerate; track->st->time_base.num = 1; track->st->time_base.den = audio->config.out.samplerate; } else { track->st->codec->time_base = m->time_base; track->st->time_base = m->time_base; } priv_data = NULL; priv_size = 0; switch (audio->config.out.codec & HB_ACODEC_MASK) { case HB_ACODEC_DCA: case HB_ACODEC_DCA_HD: track->st->codec->codec_id = AV_CODEC_ID_DTS; break; case HB_ACODEC_AC3: track->st->codec->codec_id = AV_CODEC_ID_AC3; break; case HB_ACODEC_FFEAC3: track->st->codec->codec_id = AV_CODEC_ID_EAC3; break; case HB_ACODEC_FFTRUEHD: track->st->codec->codec_id = AV_CODEC_ID_TRUEHD; break; case HB_ACODEC_LAME: case HB_ACODEC_MP3: track->st->codec->codec_id = AV_CODEC_ID_MP3; break; case HB_ACODEC_VORBIS: { track->st->codec->codec_id = AV_CODEC_ID_VORBIS; int jj, size = 0; ogg_packet *ogg_headers[3]; for (jj = 0; jj < 3; jj++) { ogg_headers[jj] = (ogg_packet *)audio->priv.config.vorbis.headers[jj]; size += ogg_headers[jj]->bytes + 2; } priv_size = size; priv_data = av_malloc(priv_size); if (priv_data == NULL) { hb_error("Vorbis extradata: malloc failure"); goto error; } size = 0; for(jj = 0; jj < 3; jj++) { AV_WB16(priv_data + size, ogg_headers[jj]->bytes); size += 2; memcpy(priv_data+size, ogg_headers[jj]->packet, ogg_headers[jj]->bytes); size += ogg_headers[jj]->bytes; } } break; case HB_ACODEC_FFFLAC: case HB_ACODEC_FFFLAC24: track->st->codec->codec_id = AV_CODEC_ID_FLAC; if (audio->priv.config.extradata.length) { priv_size = audio->priv.config.extradata.length; priv_data = av_malloc(priv_size); if (priv_data == NULL) { hb_error("FLAC extradata: malloc failure"); goto error; } memcpy(priv_data, audio->priv.config.extradata.bytes, audio->priv.config.extradata.length); } break; case HB_ACODEC_FFAAC: case HB_ACODEC_CA_AAC: case HB_ACODEC_CA_HAAC: case HB_ACODEC_FDK_AAC: case HB_ACODEC_FDK_HAAC: track->st->codec->codec_id = AV_CODEC_ID_AAC; // libav mkv muxer expects there to be extradata for // AAC and will crash if it is NULL. So allocate extra // byte so that av_malloc does not return NULL when length // is 0. priv_size = audio->priv.config.extradata.length; priv_data = av_malloc(priv_size + 1); if (priv_data == NULL) { hb_error("AAC extradata: malloc failure"); goto error; } memcpy(priv_data, audio->priv.config.extradata.bytes, audio->priv.config.extradata.length); // AAC from pass-through source may be ADTS. // Therefore inserting "aac_adtstoasc" bitstream filter is // preferred. // The filter does nothing for non-ADTS bitstream. if (audio->config.out.codec == HB_ACODEC_AAC_PASS) { track->bitstream_filter = av_bitstream_filter_init("aac_adtstoasc"); } break; default: hb_error("muxavformat: Unknown audio codec: %x", audio->config.out.codec); goto error; } track->st->codec->extradata = priv_data; track->st->codec->extradata_size = priv_size; if( default_track_flag ) { track->st->disposition |= AV_DISPOSITION_DEFAULT; default_track_flag = 0; } lang = lookup_lang_code(job->mux, audio->config.lang.iso639_2 ); if (lang != NULL) { av_dict_set(&track->st->metadata, "language", lang, 0); } track->st->codec->sample_rate = audio->config.out.samplerate; if (audio->config.out.codec & HB_ACODEC_PASS_FLAG) { track->st->codec->channels = av_get_channel_layout_nb_channels(audio->config.in.channel_layout); track->st->codec->channel_layout = audio->config.in.channel_layout; } else { track->st->codec->channels = hb_mixdown_get_discrete_channel_count(audio->config.out.mixdown); track->st->codec->channel_layout = hb_ff_mixdown_xlat(audio->config.out.mixdown, NULL); } char *name; if (audio->config.out.name == NULL) { switch (track->st->codec->channels) { case 1: name = "Mono"; break; case 2: name = "Stereo"; break; default: name = "Surround"; break; } } else { name = audio->config.out.name; } // Set audio track title av_dict_set(&track->st->metadata, "title", name, 0); if (job->mux == HB_MUX_AV_MP4) { // Some software (MPC, mediainfo) use hdlr description // for track title av_dict_set(&track->st->metadata, "handler", name, 0); } } char * subidx_fmt = "size: %dx%d\n" "org: %d, %d\n" "scale: 100%%, 100%%\n" "alpha: 100%%\n" "smooth: OFF\n" "fadein/out: 50, 50\n" "align: OFF at LEFT TOP\n" "time offset: 0\n" "forced subs: %s\n" "palette: %06x, %06x, %06x, %06x, %06x, %06x, " "%06x, %06x, %06x, %06x, %06x, %06x, %06x, %06x, %06x, %06x\n" "custom colors: OFF, tridx: 0000, " "colors: 000000, 000000, 000000, 000000\n"; int subtitle_default = -1; for( ii = 0; ii < hb_list_count( job->list_subtitle ); ii++ ) { hb_subtitle_t *subtitle = hb_list_item( job->list_subtitle, ii ); if( subtitle->config.dest == PASSTHRUSUB ) { if ( subtitle->config.default_track ) subtitle_default = ii; } } // Quicktime requires that at least one subtitle is enabled, // else it doesn't show any of the subtitles. // So check to see if any of the subtitles are flagged to be // the defualt. The default will the the enabled track, else // enable the first track. if (job->mux == HB_MUX_AV_MP4 && subtitle_default == -1) { subtitle_default = 0; } for( ii = 0; ii < hb_list_count( job->list_subtitle ); ii++ ) { hb_subtitle_t * subtitle; uint32_t rgb[16]; char subidx[2048]; int len; subtitle = hb_list_item( job->list_subtitle, ii ); if (subtitle->config.dest != PASSTHRUSUB) continue; track = m->tracks[m->ntracks++] = calloc(1, sizeof( hb_mux_data_t ) ); subtitle->mux_data = track; track->type = MUX_TYPE_SUBTITLE; track->st = avformat_new_stream(m->oc, NULL); if (track->st == NULL) { hb_error("Could not initialize subtitle stream"); goto error; } avcodec_get_context_defaults3(track->st->codec, NULL); track->st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE; track->st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; track->st->time_base = m->time_base; track->st->codec->time_base = m->time_base; track->st->codec->width = subtitle->width; track->st->codec->height = subtitle->height; priv_data = NULL; priv_size = 0; switch (subtitle->source) { case VOBSUB: { int jj; track->st->codec->codec_id = AV_CODEC_ID_DVD_SUBTITLE; for (jj = 0; jj < 16; jj++) rgb[jj] = hb_yuv2rgb(subtitle->palette[jj]); len = snprintf(subidx, 2048, subidx_fmt, subtitle->width, subtitle->height, 0, 0, "OFF", rgb[0], rgb[1], rgb[2], rgb[3], rgb[4], rgb[5], rgb[6], rgb[7], rgb[8], rgb[9], rgb[10], rgb[11], rgb[12], rgb[13], rgb[14], rgb[15]); priv_size = len + 1; priv_data = av_malloc(priv_size); if (priv_data == NULL) { hb_error("VOBSUB extradata: malloc failure"); goto error; } memcpy(priv_data, subidx, priv_size); } break; case PGSSUB: { track->st->codec->codec_id = AV_CODEC_ID_HDMV_PGS_SUBTITLE; } break; case CC608SUB: case CC708SUB: case TX3GSUB: case SRTSUB: case UTF8SUB: case SSASUB: { if (job->mux == HB_MUX_AV_MP4) { track->st->codec->codec_id = AV_CODEC_ID_MOV_TEXT; } else { track->st->codec->codec_id = AV_CODEC_ID_SSA; need_fonts = 1; if (subtitle->extradata_size) { priv_size = subtitle->extradata_size; priv_data = av_malloc(priv_size); if (priv_data == NULL) { hb_error("SSA extradata: malloc failure"); goto error; } memcpy(priv_data, subtitle->extradata, priv_size); } } } break; default: continue; } if (track->st->codec->codec_id == AV_CODEC_ID_MOV_TEXT) { // Build codec extradata for tx3g. // If we were using a libav codec to generate this data // this would (or should) be done for us. uint8_t properties[] = { 0x00, 0x00, 0x00, 0x00, // Display Flags 0x01, // Horiz. Justification 0xff, // Vert. Justification 0x00, 0x00, 0x00, 0xff, // Bg color 0x00, 0x00, 0x00, 0x00, // Default text box 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Reserved 0x00, 0x01, // Font ID 0x00, // Font face 0x18, // Font size 0xff, 0xff, 0xff, 0xff, // Fg color // Font table: 0x00, 0x00, 0x00, 0x12, // Font table size 'f','t','a','b', // Tag 0x00, 0x01, // Count 0x00, 0x01, // Font ID 0x05, // Font name length 'A','r','i','a','l' // Font name }; int width, height = 60; width = job->width * job->par.num / job->par.den; track->st->codec->width = width; track->st->codec->height = height; properties[14] = height >> 8; properties[15] = height & 0xff; properties[16] = width >> 8; properties[17] = width & 0xff; priv_size = sizeof(properties); priv_data = av_malloc(priv_size); if (priv_data == NULL) { hb_error("TX3G extradata: malloc failure"); goto error; } memcpy(priv_data, properties, priv_size); } track->st->codec->extradata = priv_data; track->st->codec->extradata_size = priv_size; if (ii == subtitle_default) { track->st->disposition |= AV_DISPOSITION_DEFAULT; } if (subtitle->config.default_track) { track->st->disposition |= AV_DISPOSITION_FORCED; } lang = lookup_lang_code(job->mux, subtitle->iso639_2 ); if (lang != NULL) { av_dict_set(&track->st->metadata, "language", lang, 0); } }
static int start_ffmpeg_impl(FFMpegContext *context, struct RenderData *rd, int rectx, int recty, const char *suffix, ReportList *reports) { /* Handle to the output file */ AVFormatContext *of; AVOutputFormat *fmt; AVDictionary *opts = NULL; char name[256], error[1024]; const char **exts; context->ffmpeg_type = rd->ffcodecdata.type; context->ffmpeg_codec = rd->ffcodecdata.codec; context->ffmpeg_audio_codec = rd->ffcodecdata.audio_codec; context->ffmpeg_video_bitrate = rd->ffcodecdata.video_bitrate; context->ffmpeg_audio_bitrate = rd->ffcodecdata.audio_bitrate; context->ffmpeg_gop_size = rd->ffcodecdata.gop_size; context->ffmpeg_autosplit = rd->ffcodecdata.flags & FFMPEG_AUTOSPLIT_OUTPUT; /* Determine the correct filename */ ffmpeg_filepath_get(context, name, rd, context->ffmpeg_preview, suffix); PRINT("Starting output to %s(ffmpeg)...\n" " Using type=%d, codec=%d, audio_codec=%d,\n" " video_bitrate=%d, audio_bitrate=%d,\n" " gop_size=%d, autosplit=%d\n" " render width=%d, render height=%d\n", name, context->ffmpeg_type, context->ffmpeg_codec, context->ffmpeg_audio_codec, context->ffmpeg_video_bitrate, context->ffmpeg_audio_bitrate, context->ffmpeg_gop_size, context->ffmpeg_autosplit, rectx, recty); exts = get_file_extensions(context->ffmpeg_type); if (!exts) { BKE_report(reports, RPT_ERROR, "No valid formats found"); return 0; } fmt = av_guess_format(NULL, exts[0], NULL); if (!fmt) { BKE_report(reports, RPT_ERROR, "No valid formats found"); return 0; } of = avformat_alloc_context(); if (!of) { BKE_report(reports, RPT_ERROR, "Error opening output file"); return 0; } of->oformat = fmt; of->packet_size = rd->ffcodecdata.mux_packet_size; if (context->ffmpeg_audio_codec != AV_CODEC_ID_NONE) { ffmpeg_dict_set_int(&opts, "muxrate", rd->ffcodecdata.mux_rate); } else { av_dict_set(&opts, "muxrate", "0", 0); } ffmpeg_dict_set_int(&opts, "preload", (int)(0.5 * AV_TIME_BASE)); of->max_delay = (int)(0.7 * AV_TIME_BASE); fmt->audio_codec = context->ffmpeg_audio_codec; BLI_strncpy(of->filename, name, sizeof(of->filename)); /* set the codec to the user's selection */ switch (context->ffmpeg_type) { case FFMPEG_AVI: case FFMPEG_MOV: case FFMPEG_MKV: fmt->video_codec = context->ffmpeg_codec; break; case FFMPEG_OGG: fmt->video_codec = AV_CODEC_ID_THEORA; break; case FFMPEG_DV: fmt->video_codec = AV_CODEC_ID_DVVIDEO; break; case FFMPEG_MPEG1: fmt->video_codec = AV_CODEC_ID_MPEG1VIDEO; break; case FFMPEG_MPEG2: fmt->video_codec = AV_CODEC_ID_MPEG2VIDEO; break; case FFMPEG_H264: fmt->video_codec = AV_CODEC_ID_H264; break; case FFMPEG_XVID: fmt->video_codec = AV_CODEC_ID_MPEG4; break; case FFMPEG_FLV: fmt->video_codec = AV_CODEC_ID_FLV1; break; case FFMPEG_MPEG4: default: fmt->video_codec = context->ffmpeg_codec; break; } if (fmt->video_codec == AV_CODEC_ID_DVVIDEO) { if (rectx != 720) { BKE_report(reports, RPT_ERROR, "Render width has to be 720 pixels for DV!"); return 0; } if (rd->frs_sec != 25 && recty != 480) { BKE_report(reports, RPT_ERROR, "Render height has to be 480 pixels for DV-NTSC!"); return 0; } if (rd->frs_sec == 25 && recty != 576) { BKE_report(reports, RPT_ERROR, "Render height has to be 576 pixels for DV-PAL!"); return 0; } } if (context->ffmpeg_type == FFMPEG_DV) { fmt->audio_codec = AV_CODEC_ID_PCM_S16LE; if (context->ffmpeg_audio_codec != AV_CODEC_ID_NONE && rd->ffcodecdata.audio_mixrate != 48000 && rd->ffcodecdata.audio_channels != 2) { BKE_report(reports, RPT_ERROR, "FFMPEG only supports 48khz / stereo audio for DV!"); av_dict_free(&opts); return 0; } } if (fmt->video_codec != AV_CODEC_ID_NONE) { context->video_stream = alloc_video_stream(context, rd, fmt->video_codec, of, rectx, recty, error, sizeof(error)); PRINT("alloc video stream %p\n", context->video_stream); if (!context->video_stream) { if (error[0]) BKE_report(reports, RPT_ERROR, error); else BKE_report(reports, RPT_ERROR, "Error initializing video stream"); av_dict_free(&opts); return 0; } } if (context->ffmpeg_audio_codec != AV_CODEC_ID_NONE) { context->audio_stream = alloc_audio_stream(context, rd, fmt->audio_codec, of, error, sizeof(error)); if (!context->audio_stream) { if (error[0]) BKE_report(reports, RPT_ERROR, error); else BKE_report(reports, RPT_ERROR, "Error initializing audio stream"); av_dict_free(&opts); return 0; } } if (!(fmt->flags & AVFMT_NOFILE)) { if (avio_open(&of->pb, name, AVIO_FLAG_WRITE) < 0) { BKE_report(reports, RPT_ERROR, "Could not open file for writing"); av_dict_free(&opts); return 0; } } if (avformat_write_header(of, NULL) < 0) { BKE_report(reports, RPT_ERROR, "Could not initialize streams, probably unsupported codec combination"); av_dict_free(&opts); avio_close(of->pb); return 0; } context->outfile = of; av_dump_format(of, 0, name, 1); av_dict_free(&opts); return 1; }
static int hls_read_header(AVFormatContext *s) { HLSContext *c = s->priv_data; c->ctx = s; int ret = 0, i, j, stream_offset = 0; c->interrupt_callback = &s->interrupt_callback; if ((ret = parse_playlist(c, s->filename, NULL, s->pb)) < 0) goto fail; if (c->n_variants == 0) { av_log(NULL, AV_LOG_WARNING, "Empty playlist\n"); ret = AVERROR_EOF; goto fail; } /* If the playlist only contained variants, parse each individual * variant playlist. */ if (c->n_variants > 1 || c->variants[0]->n_segments == 0) { for (i = 0; i < c->n_variants; i++) { struct variant *v = c->variants[i]; if ((ret = parse_playlist(c, v->url, v, NULL)) < 0) goto fail; } } if (c->variants[0]->n_segments == 0) { av_log(NULL, AV_LOG_WARNING, "Empty playlist\n"); ret = AVERROR_EOF; goto fail; } /* If this isn't a live stream, calculate the total duration of the * stream. */ if (c->variants[0]->finished) { double duration = 0; for (i = 0; i < c->variants[0]->n_segments; i++) duration += c->variants[0]->segments[i]->duration; s->duration = duration * AV_TIME_BASE; } /* * Add By Yen calculate the total duration for support DVR */ #if _SUPPORT_HLS_LIVE_SEEK_ else { double duration = 0; for (i = 0; i < c->variants[0]->n_segments - 3; i++) duration += c->variants[0]->segments[i]->duration; s->duration = duration * AV_TIME_BASE; } #endif /* Open the demuxer for each variant */ for (i = 0; i < c->n_variants; i++) { struct variant *v = c->variants[i]; AVInputFormat *in_fmt = NULL; char bitrate_str[20]; if (v->n_segments == 0) continue; if (!(v->ctx = avformat_alloc_context())) { ret = AVERROR(ENOMEM); goto fail; } v->index = i; v->needed = 1; v->parent = s; // __android_log_print(ANDROID_LOG_VERBOSE, TAG, "hls_read_header,index = %d", i); /* If this is a live stream with more than 3 segments, start at the * third last segment. */ v->cur_seq_no = v->start_seq_no; if (!v->finished && v->n_segments > 3) v->cur_seq_no = v->start_seq_no + v->n_segments - 3; v->read_buffer = av_malloc(INITIAL_BUFFER_SIZE); ffio_init_context(&v->pb, v->read_buffer, INITIAL_BUFFER_SIZE, 0, v, read_data, NULL, NULL); v->pb.seekable = 1; ret = av_probe_input_buffer(&v->pb, &in_fmt, v->segments[0]->url, NULL, 0, 0); if (ret < 0) { /* Free the ctx - it isn't initialized properly at this point, * so avformat_close_input shouldn't be called. If * avformat_open_input fails below, it frees and zeros the * context, so it doesn't need any special treatment like this. */ avformat_free_context(v->ctx); v->ctx = NULL; goto fail; } v->ctx->pb = &v->pb; ret = avformat_open_input(&v->ctx, v->segments[0]->url, in_fmt, NULL); if (ret < 0) goto fail; v->stream_offset = stream_offset; snprintf(bitrate_str, sizeof(bitrate_str), "%d", v->bandwidth); /* Create new AVStreams for each stream in this variant */ for (j = 0; j < v->ctx->nb_streams; j++) { AVStream *st = avformat_new_stream(s, NULL); if (!st) { ret = AVERROR(ENOMEM); goto fail; } st->id = i; avcodec_copy_context(st->codec, v->ctx->streams[j]->codec); if (v->bandwidth) av_dict_set(&st->metadata, "variant_bitrate", bitrate_str, 0); } stream_offset += v->ctx->nb_streams; } c->first_packet = 1; c->first_timestamp = AV_NOPTS_VALUE; c->seek_timestamp = AV_NOPTS_VALUE; return 0; fail: free_variant_list(c); return ret; }
static int process_options(AVFilterContext *ctx, AVDictionary **options, const char *args) { const AVOption *o = NULL; int ret, count = 0; char *av_uninit(parsed_key), *av_uninit(value); const char *key; int offset= -1; if (!args) return 0; while (*args) { const char *shorthand = NULL; o = av_opt_next(ctx->priv, o); if (o) { if (o->type == AV_OPT_TYPE_CONST || o->offset == offset) continue; offset = o->offset; shorthand = o->name; } ret = av_opt_get_key_value(&args, "=", ":", shorthand ? AV_OPT_FLAG_IMPLICIT_KEY : 0, &parsed_key, &value); if (ret < 0) { if (ret == AVERROR(EINVAL)) av_log(ctx, AV_LOG_ERROR, "No option name near '%s'\n", args); else av_log(ctx, AV_LOG_ERROR, "Unable to parse '%s': %s\n", args, av_err2str(ret)); return ret; } if (*args) args++; if (parsed_key) { key = parsed_key; while ((o = av_opt_next(ctx->priv, o))); /* discard all remaining shorthand */ } else { key = shorthand; } av_log(ctx, AV_LOG_DEBUG, "Setting '%s' to value '%s'\n", key, value); if (av_opt_find(ctx, key, NULL, 0, 0)) { ret = av_opt_set(ctx, key, value, 0); if (ret < 0) { av_free(value); av_free(parsed_key); return ret; } } else { av_dict_set(options, key, value, 0); if ((ret = av_opt_set(ctx->priv, key, value, 0)) < 0) { if (!av_opt_find(ctx->priv, key, NULL, 0, AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)) { if (ret == AVERROR_OPTION_NOT_FOUND) av_log(ctx, AV_LOG_ERROR, "Option '%s' not found\n", key); av_free(value); av_free(parsed_key); return ret; } } } av_free(value); av_free(parsed_key); count++; } if (ctx->enable_str) { ret = set_enable_expr(ctx, ctx->enable_str); if (ret < 0) return ret; } return count; }
int decode_thread(void *arg) { VideoState *is = (VideoState *)arg; AVPacket pkt1, *packet = &pkt1; AVDictionary *io_dict = NULL; AVIOInterruptCB callback; int video_index = -1; int audio_index = -1; int i; int ret; int eof = 0; is->videoStream=-1; is->audioStream=-1; AVDictionary *options = NULL; av_dict_set(&options, "icy", "1", 0); av_dict_set(&options, "user-agent", "FFmpegMediaPlayer", 0); if (is->headers) { av_dict_set(&options, "headers", is->headers, 0); } if (is->offset > 0) { is->pFormatCtx = avformat_alloc_context(); is->pFormatCtx->skip_initial_bytes = is->offset; //is->pFormatCtx->iformat = av_find_input_format("mp3"); } // will interrupt blocking functions if we quit! callback.callback = decode_interrupt_cb; callback.opaque = is; if (avio_open2(&is->io_context, is->filename, 0, &callback, &io_dict)) { fprintf(stderr, "Unable to open I/O for %s\n", is->filename); return -1; } // Open video file if(avformat_open_input(&is->pFormatCtx, is->filename, NULL, &options)!=0) return -1; // Couldn't open file // Retrieve stream information if(avformat_find_stream_info(is->pFormatCtx, NULL)<0) return -1; // Couldn't find stream information // Dump information about file onto standard error av_dump_format(is->pFormatCtx, 0, is->filename, 0); // Find the first video stream for(i=0; i<is->pFormatCtx->nb_streams; i++) { if(is->pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO && video_index < 0) { video_index=i; } if(is->pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO && audio_index < 0) { audio_index=i; } set_codec(is->pFormatCtx, i); } if(audio_index >= 0) { stream_component_open(is, audio_index); } if(video_index >= 0) { stream_component_open(is, video_index); } if(is->videoStream < 0 && is->audioStream < 0) { //if(is->videoStream < 0 || is->audioStream < 0) { fprintf(stderr, "%s: could not open codecs\n", is->filename); notify(is, MEDIA_ERROR, 0, 0); return 0; } set_rotation(is->pFormatCtx, is->audio_st, is->video_st); set_framerate(is->pFormatCtx, is->audio_st, is->video_st); set_filesize(is->pFormatCtx); set_chapter_count(is->pFormatCtx); // main decode loop for(;;) { if(is->quit) { break; } /*if (is->paused != is->last_paused) { is->last_paused = is->paused; if (is->paused) is->read_pause_return = av_read_pause(is->pFormatCtx); else av_read_play(is->pFormatCtx); }*/ // seek stuff goes here if(is->seek_req) { int64_t seek_target = is->seek_pos; int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN; int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX; int ret = avformat_seek_file(is->pFormatCtx, -1, seek_min, seek_target, seek_max, is->seek_flags); if(ret < 0) { fprintf(stderr, "%s: error while seeking\n", is->pFormatCtx->filename); } else { if(is->audioStream >= 0) { packet_queue_flush(&is->audioq); packet_queue_put(is, &is->audioq, &is->flush_pkt); } if(is->videoStream >= 0) { packet_queue_flush(&is->videoq); packet_queue_put(is, &is->videoq, &is->flush_pkt); } notify(is, MEDIA_SEEK_COMPLETE, 0, 0); } is->seek_req = 0; eof = 0; } if (is->audioq.size >= MAX_AUDIOQ_SIZE && !is->prepared) { queueAudioSamples(&is->audio_player, is); notify(is, MEDIA_PREPARED, 0, 0); is->prepared = 1; } if(is->audioq.size > MAX_AUDIOQ_SIZE || is->videoq.size > MAX_VIDEOQ_SIZE) { SDL_Delay(10); continue; } if((ret = av_read_frame(is->pFormatCtx, packet)) < 0) { if (ret == AVERROR_EOF || !is->pFormatCtx->pb->eof_reached) { eof = 1; break; } if(is->pFormatCtx->pb->error == 0) { SDL_Delay(100); /* no error; wait for user input */ continue; } else { break; } } // Is this a packet from the video stream? if(packet->stream_index == is->videoStream) { packet_queue_put(is, &is->videoq, packet); } else if(packet->stream_index == is->audioStream) { packet_queue_put(is, &is->audioq, packet); } else { av_free_packet(packet); } if (eof) { break; } } if (eof) { notify(is, MEDIA_PLAYBACK_COMPLETE, 0, 0); } return 0; }
// open video capture device void VideoFFmpeg::openCam (char *file, short camIdx) { // open camera source AVInputFormat *inputFormat; AVDictionary *formatParams = NULL; char filename[28], rateStr[20]; #ifdef WIN32 // video capture on windows only through Video For Windows driver inputFormat = av_find_input_format("vfwcap"); if (!inputFormat) // Video For Windows not supported?? return; sprintf(filename, "%d", camIdx); #else // In Linux we support two types of devices: VideoForLinux and DV1394. // the user specify it with the filename: // [<device_type>][:<standard>] // <device_type> : 'v4l' for VideoForLinux, 'dv1394' for DV1394. By default 'v4l' // <standard> : 'pal', 'secam' or 'ntsc'. By default 'ntsc' // The driver name is constructed automatically from the device type: // v4l : /dev/video<camIdx> // dv1394: /dev/dv1394/<camIdx> // If you have different driver name, you can specify the driver name explicitly // instead of device type. Examples of valid filename: // /dev/v4l/video0:pal // /dev/ieee1394/1:ntsc // dv1394:secam // v4l:pal char *p; if (file && strstr(file, "1394") != NULL) { // the user specifies a driver, check if it is v4l or d41394 inputFormat = av_find_input_format("dv1394"); sprintf(filename, "/dev/dv1394/%d", camIdx); } else { const char *formats[] = {"video4linux2,v4l2", "video4linux2", "video4linux"}; int i, formatsCount = sizeof(formats) / sizeof(char*); for (i = 0; i < formatsCount; i++) { inputFormat = av_find_input_format(formats[i]); if (inputFormat) break; } sprintf(filename, "/dev/video%d", camIdx); } if (!inputFormat) // these format should be supported, check ffmpeg compilation return; if (file && strncmp(file, "/dev", 4) == 0) { // user does not specify a driver strncpy(filename, file, sizeof(filename)); filename[sizeof(filename)-1] = 0; if ((p = strchr(filename, ':')) != 0) *p = 0; } if (file && (p = strchr(file, ':')) != NULL) { av_dict_set(&formatParams, "standard", p+1, 0); } #endif //frame rate if (m_captRate <= 0.f) m_captRate = defFrameRate; sprintf(rateStr, "%f", m_captRate); av_dict_set(&formatParams, "framerate", rateStr, 0); if (m_captWidth > 0 && m_captHeight > 0) { char video_size[64]; BLI_snprintf(video_size, sizeof(video_size), "%dx%d", m_captWidth, m_captHeight); av_dict_set(&formatParams, "video_size", video_size, 0); } if (openStream(filename, inputFormat, &formatParams) != 0) return; // for video capture it is important to do non blocking read m_formatCtx->flags |= AVFMT_FLAG_NONBLOCK; // open base class VideoBase::openCam(file, camIdx); // check if we should do multi-threading? if (BLI_system_thread_count() > 1) { // no need to thread if the system has a single core m_isThreaded = true; } av_dict_free(&formatParams); }
static int sox_read_header(AVFormatContext *s) { AVIOContext *pb = s->pb; unsigned header_size, comment_size; double sample_rate, sample_rate_frac; AVStream *st; st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; if (avio_rl32(pb) == SOX_TAG) { st->codecpar->codec_id = AV_CODEC_ID_PCM_S32LE; header_size = avio_rl32(pb); avio_skip(pb, 8); /* sample count */ sample_rate = av_int2double(avio_rl64(pb)); st->codecpar->channels = avio_rl32(pb); comment_size = avio_rl32(pb); } else { st->codecpar->codec_id = AV_CODEC_ID_PCM_S32BE; header_size = avio_rb32(pb); avio_skip(pb, 8); /* sample count */ sample_rate = av_int2double(avio_rb64(pb)); st->codecpar->channels = avio_rb32(pb); comment_size = avio_rb32(pb); } if (comment_size > 0xFFFFFFFFU - SOX_FIXED_HDR - 4U) { av_log(s, AV_LOG_ERROR, "invalid comment size (%u)\n", comment_size); return AVERROR_INVALIDDATA; } if (sample_rate <= 0 || sample_rate > INT_MAX) { av_log(s, AV_LOG_ERROR, "invalid sample rate (%f)\n", sample_rate); return AVERROR_INVALIDDATA; } sample_rate_frac = sample_rate - floor(sample_rate); if (sample_rate_frac) av_log(s, AV_LOG_WARNING, "truncating fractional part of sample rate (%f)\n", sample_rate_frac); if ((header_size + 4) & 7 || header_size < SOX_FIXED_HDR + comment_size || st->codecpar->channels > 65535) /* Reserve top 16 bits */ { av_log(s, AV_LOG_ERROR, "invalid header\n"); return AVERROR_INVALIDDATA; } if (comment_size && comment_size < UINT_MAX) { char *comment = av_malloc(comment_size+1); if(!comment) return AVERROR(ENOMEM); if (avio_read(pb, comment, comment_size) != comment_size) { av_freep(&comment); return AVERROR(EIO); } comment[comment_size] = 0; av_dict_set(&s->metadata, "comment", comment, AV_DICT_DONT_STRDUP_VAL); } avio_skip(pb, header_size - SOX_FIXED_HDR - comment_size); st->codecpar->sample_rate = sample_rate; st->codecpar->bits_per_coded_sample = 32; st->codecpar->bit_rate = (int64_t)st->codecpar->sample_rate * st->codecpar->bits_per_coded_sample * st->codecpar->channels; st->codecpar->block_align = st->codecpar->bits_per_coded_sample * st->codecpar->channels / 8; avpriv_set_pts_info(st, 64, 1, st->codecpar->sample_rate); return 0; }
bool stream_open(stream_t *stream, track_t* track) { int result; AVFormatContext *ctx = NULL; AVCodec *src_codec; AVDictionary *opts = NULL; char tmp[20]; if (!track) { return false; } snprintf(tmp, sizeof(tmp), "%" PRId64, track->trackindex); av_dict_set(&opts, "track_index", tmp, 0); result = avformat_open_input(&ctx, track->file, NULL, &opts); if (result < 0) { musicd_log(LOG_ERROR, "stream", "can't open file '%s': %s", track->file, strerror(AVUNERROR(result))); return false; } result = avformat_find_stream_info(ctx, NULL); if (result < 0) { musicd_log(LOG_ERROR, "stream", "can't find stream info of '%s': %s", track->file, strerror(AVUNERROR(result))); avformat_close_input(&ctx); return false; } if (ctx->nb_streams < 1 || ctx->streams[0]->codec->codec_type != AVMEDIA_TYPE_AUDIO || ctx->streams[0]->codec->codec_id == AV_CODEC_ID_NONE || (ctx->duration < 1 && ctx->streams[0]->duration < 1)) { avformat_close_input(&ctx); return false; } src_codec = avcodec_find_decoder(ctx->streams[0]->codec->codec_id); if (!src_codec) { musicd_log(LOG_ERROR, "stream", "decoder not found"); avformat_close_input(&ctx); return false; } stream->track = track; stream->src_ctx = ctx; stream->src_codec = src_codec; if (stream->src_codec->id == AV_CODEC_ID_MP3) { stream->src_codec_type = CODEC_TYPE_MP3; } else if (stream->src_codec->id == AV_CODEC_ID_VORBIS) { stream->src_codec_type = CODEC_TYPE_OGG_VORBIS; } else { stream->src_codec_type = CODEC_TYPE_OTHER; } format_from_av(stream->src_ctx->streams[0]->codec, &stream->format); /* Replay gain: test container metadata, then stream metadata. */ find_replay_gain(stream, stream->src_ctx->metadata); if (stream->replay_track_gain == 0.0 && stream->replay_track_gain == 0.0) { find_replay_gain(stream, stream->src_ctx->streams[0]->metadata); } musicd_log(LOG_DEBUG, "stream", "replaygain: %f %f %f %f", stream->replay_track_gain, stream->replay_album_gain, stream->replay_track_peak, stream->replay_album_peak); /* If the track doesn't begin from the beginning of the file, seek to relative * zero. */ if (stream->track->start > 0) { stream_seek(stream, 0); } return true; }
static int ape_tag_read_field(AVFormatContext *s) { AVIOContext *pb = s->pb; uint8_t key[1024], *value; int64_t size, flags; int i, c; size = avio_rl32(pb); /* field size */ flags = avio_rl32(pb); /* field flags */ for (i = 0; i < sizeof(key) - 1; i++) { c = avio_r8(pb); if (c < 0x20 || c > 0x7E) break; else key[i] = c; } key[i] = 0; if (c != 0) { av_log(s, AV_LOG_WARNING, "Invalid APE tag key '%s'.\n", key); return -1; } if (size > INT32_MAX - AV_INPUT_BUFFER_PADDING_SIZE) { av_log(s, AV_LOG_ERROR, "APE tag size too large.\n"); return AVERROR_INVALIDDATA; } if (flags & APE_TAG_FLAG_IS_BINARY) { uint8_t filename[1024]; enum AVCodecID id; AVStream *st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); size -= avio_get_str(pb, size, filename, sizeof(filename)); if (size <= 0) { av_log(s, AV_LOG_WARNING, "Skipping binary tag '%s'.\n", key); return 0; } av_dict_set(&st->metadata, key, filename, 0); if ((id = ff_guess_image2_codec(filename)) != AV_CODEC_ID_NONE) { AVPacket pkt; int ret; ret = av_get_packet(s->pb, &pkt, size); if (ret < 0) { av_log(s, AV_LOG_ERROR, "Error reading cover art.\n"); return ret; } st->disposition |= AV_DISPOSITION_ATTACHED_PIC; st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO; st->codecpar->codec_id = id; st->attached_pic = pkt; st->attached_pic.stream_index = st->index; st->attached_pic.flags |= AV_PKT_FLAG_KEY; } else { st->codecpar->extradata = av_malloc(size + AV_INPUT_BUFFER_PADDING_SIZE); if (!st->codecpar->extradata) return AVERROR(ENOMEM); if (avio_read(pb, st->codecpar->extradata, size) != size) { av_freep(&st->codecpar->extradata); return AVERROR(EIO); } st->codecpar->extradata_size = size; st->codecpar->codec_type = AVMEDIA_TYPE_ATTACHMENT; } } else { value = av_malloc(size+1); if (!value) return AVERROR(ENOMEM); c = avio_read(pb, value, size); if (c < 0) { av_free(value); return c; } value[c] = 0; av_dict_set(&s->metadata, key, value, AV_DICT_DONT_STRDUP_VAL); } return 0; }
void VideoEncoder::PrepareStream(AVStream* stream, AVCodec* codec, AVDictionary** options, const std::vector<std::pair<QString, QString> >& codec_options, unsigned int bit_rate, unsigned int width, unsigned int height, unsigned int frame_rate) { if(width == 0 || height == 0) { Logger::LogError("[VideoEncoder::PrepareStream] " + Logger::tr("Error: Width or height is zero!")); throw LibavException(); } if(width > 10000 || height > 10000) { Logger::LogError("[VideoEncoder::PrepareStream] " + Logger::tr("Error: Width or height is too large, the maximum width and height is %1!").arg(10000)); throw LibavException(); } if(width % 2 != 0 || height % 2 != 0) { Logger::LogError("[VideoEncoder::PrepareStream] " + Logger::tr("Error: Width or height is not an even number!")); throw LibavException(); } if(frame_rate == 0) { Logger::LogError("[VideoEncoder::PrepareStream] " + Logger::tr("Error: Frame rate is zero!")); throw LibavException(); } stream->codec->bit_rate = bit_rate; stream->codec->width = width; stream->codec->height = height; stream->codec->time_base.num = 1; stream->codec->time_base.den = frame_rate; #if SSR_USE_AVSTREAM_TIME_BASE stream->time_base = stream->codec->time_base; #endif stream->codec->pix_fmt = AV_PIX_FMT_NONE; for(unsigned int i = 0; i < SUPPORTED_PIXEL_FORMATS.size(); ++i) { if(AVCodecSupportsPixelFormat(codec, SUPPORTED_PIXEL_FORMATS[i].m_format)) { stream->codec->pix_fmt = SUPPORTED_PIXEL_FORMATS[i].m_format; if(SUPPORTED_PIXEL_FORMATS[i].m_is_yuv) { stream->codec->color_primaries = AVCOL_PRI_BT709; stream->codec->color_trc = AVCOL_TRC_BT709; stream->codec->colorspace = AVCOL_SPC_BT709; stream->codec->color_range = AVCOL_RANGE_MPEG; stream->codec->chroma_sample_location = AVCHROMA_LOC_CENTER; } else { stream->codec->colorspace = AVCOL_SPC_RGB; } break; } } if(stream->codec->pix_fmt == AV_PIX_FMT_NONE) { Logger::LogError("[VideoEncoder::PrepareStream] " + Logger::tr("Error: Encoder requires an unsupported pixel format!")); throw LibavException(); } stream->codec->sample_aspect_ratio.num = 1; stream->codec->sample_aspect_ratio.den = 1; stream->sample_aspect_ratio = stream->codec->sample_aspect_ratio; stream->codec->thread_count = std::max(1, (int) std::thread::hardware_concurrency()); for(unsigned int i = 0; i < codec_options.size(); ++i) { const QString &key = codec_options[i].first, &value = codec_options[i].second; if(key == "threads") { stream->codec->thread_count = ParseCodecOptionInt(key, value, 1, 100); } else if(key == "qscale") { stream->codec->flags |= CODEC_FLAG_QSCALE; stream->codec->global_quality = lrint(ParseCodecOptionDouble(key, value, -1.0e6, 1.0e6, FF_QP2LAMBDA)); } else if(key == "minrate") { stream->codec->rc_min_rate = ParseCodecOptionInt(key, value, 1, 1000000, 1024); // kbps } else if(key == "maxrate") { stream->codec->rc_max_rate = ParseCodecOptionInt(key, value, 1, 1000000, 1024); // kbps } else if(key == "bufsize") { stream->codec->rc_buffer_size = ParseCodecOptionInt(key, value, 1, 1000000, 1024); // kbps } else if(key == "keyint") { stream->codec->gop_size = ParseCodecOptionInt(key, value, 1, 1000000); #if !SSR_USE_AVCODEC_PRIVATE_PRESET } else if(key == "crf") { stream->codec->crf = ParseCodecOptionInt(key, value, 0, 51); #endif #if !SSR_USE_AVCODEC_PRIVATE_PRESET } else if(key == "preset") { X264Preset(stream->codec, value.toUtf8().constData()); #endif } else { av_dict_set(options, key.toUtf8().constData(), value.toUtf8().constData(), 0); } } }
int encode_lavc_start(struct encode_lavc_context *ctx) { AVDictionaryEntry *de; if (ctx->header_written < 0) return 0; if (ctx->header_written > 0) return 1; CHECK_FAIL(ctx, 0); if (ctx->expect_video && ctx->vcc == NULL) { if (ctx->avc->oformat->video_codec != AV_CODEC_ID_NONE || ctx->options->vcodec) { encode_lavc_fail(ctx, "no video stream succeeded - invalid codec?\n"); return 0; } } if (ctx->expect_audio && ctx->acc == NULL) { if (ctx->avc->oformat->audio_codec != AV_CODEC_ID_NONE || ctx->options->acodec) { encode_lavc_fail(ctx, "no audio stream succeeded - invalid codec?\n"); return 0; } } ctx->header_written = -1; if (!(ctx->avc->oformat->flags & AVFMT_NOFILE)) { MP_INFO(ctx, "Opening output file: %s\n", ctx->avc->filename); if (avio_open(&ctx->avc->pb, ctx->avc->filename, AVIO_FLAG_WRITE) < 0) { encode_lavc_fail(ctx, "could not open '%s'\n", ctx->avc->filename); return 0; } } ctx->t0 = mp_time_sec(); MP_INFO(ctx, "Opening muxer: %s [%s]\n", ctx->avc->oformat->long_name, ctx->avc->oformat->name); if (ctx->metadata) { for (int i = 0; i < ctx->metadata->num_keys; i++) { av_dict_set(&ctx->avc->metadata, ctx->metadata->keys[i], ctx->metadata->values[i], 0); } } if (avformat_write_header(ctx->avc, &ctx->foptions) < 0) { encode_lavc_fail(ctx, "could not write header\n"); return 0; } for (de = NULL; (de = av_dict_get(ctx->foptions, "", de, AV_DICT_IGNORE_SUFFIX));) MP_WARN(ctx, "ofopts: key '%s' not found.\n", de->key); av_dict_free(&ctx->foptions); ctx->header_written = 1; return 1; }
int main(int argc, char* argv[]) { avcodec_register_all(); av_register_all(); av_log_set_level(AV_LOG_DEBUG); AVFormatContext *pFormatCtx = avformat_alloc_context(); AVCodecContext *pCodecCtx; AVCodec *pCodec; AVFrame *pFrame, *pFrameRGB; AVPicture pict; struct SwsContext *img_convert_ctx; uint8_t *buffer; int videoStream = -1; int numBytes; int ret = 0; fprintf(stdout, "%s\n", argv[1]); AVInputFormat *fileFormat; fileFormat = av_find_input_format("ass"); if (!fileFormat) { av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n"); return AVERROR(EINVAL); } char *charenc = NULL; if (argv[2] != NULL) { charenc = av_malloc(strlen (argv[2])); memset(charenc, 0, 10); memcpy(charenc, argv[2], strlen(argv[2])); printf("charenc :%p %s\n", charenc, charenc); } AVDictionary *codec_opts = NULL; if (charenc) { av_dict_set(&codec_opts, "sub_charenc", charenc, 0); } if ((ret = avformat_open_input(&pFormatCtx, argv[1], fileFormat, &codec_opts)) != 0) { fprintf(stderr, "av_open_input_file \n"); return -1; } fprintf(stdout, "pFormatCtx %p\n", pFormatCtx); if (avformat_find_stream_info(pFormatCtx, NULL) < 0) { fprintf(stderr, "av_find_stream_info \n"); return -1; } av_dump_format(pFormatCtx, 0, argv[1], 0); int i; for (i = 0; i < pFormatCtx->nb_streams; ++i) { if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) { videoStream = i; break ; } } if (videoStream == -1) { fprintf(stderr, "Unsupported videoStream.\n"); return -1; } fprintf(stdout, "videoStream: %d\n", videoStream); pCodecCtx = pFormatCtx->streams[videoStream]->codec; pCodecCtx->debug = 1; pCodec = avcodec_find_decoder(pCodecCtx->codec_id); if (pCodec == NULL) { fprintf(stderr, "Unsupported codec.\n"); return -1; } //pCodecCtx->sub_charenc = charenc;//"UTF-16LE"; if (avcodec_open2(pCodecCtx, pCodec, &codec_opts) < 0) { fprintf(stderr, "Could not open codec.\n"); return -1; } printf("pCodecCtx->sub_charenc :%s\n", pCodecCtx->sub_charenc); int index; AVPacket packet; av_init_packet(&packet); AVSubtitle sub; int len1, gotSubtitle; for (index = 0; ; index++) { //memset(&packet, 0, sizeof (packet)); ret = av_read_frame(pFormatCtx, &packet); if (ret < 0) { fprintf(stderr, "read frame ret:%s\n", ret_str(ret)); break ; } fprintf(stdout, "stream_index:%d\n", packet.stream_index); if (packet.stream_index == videoStream) { ret = avcodec_decode_subtitle2(pCodecCtx, &sub, &gotSubtitle, &packet); if (index > 30) { break ; } fprintf(stdout, "gotSubtitle:%d\n", gotSubtitle); dumpSubtitle(&sub); } av_free_packet(&packet); } end: fprintf(stderr, "end return.\n"); avcodec_close(pCodecCtx); avformat_close_input(&pFormatCtx); return 0; }
static int init(struct dec_audio *da, const char *decoder) { struct spdifContext *spdif_ctx = talloc_zero(NULL, struct spdifContext); da->priv = spdif_ctx; spdif_ctx->log = da->log; AVFormatContext *lavf_ctx = avformat_alloc_context(); if (!lavf_ctx) goto fail; lavf_ctx->oformat = av_guess_format("spdif", NULL, NULL); if (!lavf_ctx->oformat) goto fail; spdif_ctx->lavf_ctx = lavf_ctx; void *buffer = av_mallocz(OUTBUF_SIZE); if (!buffer) abort(); lavf_ctx->pb = avio_alloc_context(buffer, OUTBUF_SIZE, 1, spdif_ctx, NULL, write_packet, NULL); if (!lavf_ctx->pb) { av_free(buffer); goto fail; } // Request minimal buffering (not available on Libav) #if LIBAVFORMAT_VERSION_MICRO >= 100 lavf_ctx->pb->direct = 1; #endif AVStream *stream = avformat_new_stream(lavf_ctx, 0); if (!stream) goto fail; stream->codec->codec_id = mp_codec_to_av_codec_id(decoder); AVDictionary *format_opts = NULL; int num_channels = 0; int sample_format = 0; int samplerate = 0; switch (stream->codec->codec_id) { case AV_CODEC_ID_AAC: spdif_ctx->iec61937_packet_size = 16384; sample_format = AF_FORMAT_IEC61937_LE; samplerate = 48000; num_channels = 2; break; case AV_CODEC_ID_AC3: spdif_ctx->iec61937_packet_size = 6144; sample_format = AF_FORMAT_AC3_LE; samplerate = 48000; num_channels = 2; break; case AV_CODEC_ID_DTS: if (da->opts->dtshd) { av_dict_set(&format_opts, "dtshd_rate", "768000", 0); // 4*192000 spdif_ctx->iec61937_packet_size = 32768; sample_format = AF_FORMAT_IEC61937_LE; samplerate = 192000; num_channels = 2*4; } else { spdif_ctx->iec61937_packet_size = 32768; sample_format = AF_FORMAT_AC3_LE; samplerate = 48000; num_channels = 2; } break; case AV_CODEC_ID_EAC3: spdif_ctx->iec61937_packet_size = 24576; sample_format = AF_FORMAT_IEC61937_LE; samplerate = 192000; num_channels = 2; break; case AV_CODEC_ID_MP3: spdif_ctx->iec61937_packet_size = 4608; sample_format = AF_FORMAT_MPEG2; samplerate = 48000; num_channels = 2; break; case AV_CODEC_ID_TRUEHD: spdif_ctx->iec61937_packet_size = 61440; sample_format = AF_FORMAT_IEC61937_LE; samplerate = 192000; num_channels = 8; break; default: abort(); } mp_audio_set_num_channels(&da->decoded, num_channels); mp_audio_set_format(&da->decoded, sample_format); da->decoded.rate = samplerate; if (avformat_write_header(lavf_ctx, &format_opts) < 0) { MP_FATAL(da, "libavformat spdif initialization failed.\n"); av_dict_free(&format_opts); goto fail; } av_dict_free(&format_opts); spdif_ctx->need_close = true; return 1; fail: uninit(da); return 0; }
static int filter_frame(AVFilterLink *inlink, AVFrame *frame) { AVFilterContext *ctx = inlink->dst; AVFilterLink *outlink = ctx->outputs[0]; MetadataContext *s = ctx->priv; AVDictionary **metadata = avpriv_frame_get_metadatap(frame); AVDictionaryEntry *e; if (!*metadata) return ff_filter_frame(outlink, frame); e = av_dict_get(*metadata, !s->key ? "" : s->key, NULL, !s->key ? AV_DICT_IGNORE_SUFFIX: 0); switch (s->mode) { case METADATA_SELECT: if (!s->value && e && e->value) { return ff_filter_frame(outlink, frame); } else if (s->value && e && e->value && s->compare(s, e->value, s->value)) { return ff_filter_frame(outlink, frame); } break; case METADATA_ADD: if (e && e->value) { ; } else { av_dict_set(metadata, s->key, s->value, 0); } return ff_filter_frame(outlink, frame); break; case METADATA_MODIFY: if (e && e->value) { av_dict_set(metadata, s->key, s->value, 0); } return ff_filter_frame(outlink, frame); break; case METADATA_PRINT: if (!s->key && e) { s->print(ctx, "frame:%-4"PRId64" pts:%-7s pts_time:%-7s\n", inlink->frame_count, av_ts2str(frame->pts), av_ts2timestr(frame->pts, &inlink->time_base)); s->print(ctx, "%s=%s\n", e->key, e->value); while ((e = av_dict_get(*metadata, "", e, AV_DICT_IGNORE_SUFFIX)) != NULL) { s->print(ctx, "%s=%s\n", e->key, e->value); } } else if (e && e->value && (!s->value || (e->value && s->compare(s, e->value, s->value)))) { s->print(ctx, "frame:%-4"PRId64" pts:%-7s pts_time:%-7s\n", inlink->frame_count, av_ts2str(frame->pts), av_ts2timestr(frame->pts, &inlink->time_base)); s->print(ctx, "%s=%s\n", s->key, e->value); } return ff_filter_frame(outlink, frame); break; case METADATA_DELETE: if (!s->key) { av_dict_free(metadata); } else if (e && e->value && (!s->value || s->compare(s, e->value, s->value))) { av_dict_set(metadata, s->key, NULL, 0); } return ff_filter_frame(outlink, frame); break; default: av_assert0(0); }; av_frame_free(&frame); return 0; }
static bool ffemu_init_video(ffemu_t *handle) { struct ff_config_param *params = &handle->config; struct ff_video_info *video = &handle->video; struct ffemu_params *param = &handle->params; AVCodec *codec = NULL; if (*params->vcodec) codec = avcodec_find_encoder_by_name(params->vcodec); else { // By default, lossless video. av_dict_set(¶ms->video_opts, "qp", "0", 0); codec = avcodec_find_encoder_by_name("libx264rgb"); } if (!codec) { RARCH_ERR("[FFmpeg]: Cannot find vcodec %s.\n", *params->vcodec ? params->vcodec : "libx264rgb"); return false; } video->encoder = codec; // Don't use swscaler unless format is not something "in-house" scaler supports. // libswscale doesn't scale RGB -> RGB correctly (goes via YUV first), and it's non-trivial to fix // upstream as it's heavily geared towards YUV. // If we're dealing with strange formats or YUV, just use libswscale. if (params->out_pix_fmt != PIX_FMT_NONE) { video->pix_fmt = params->out_pix_fmt; if (video->pix_fmt != PIX_FMT_BGR24 && video->pix_fmt != PIX_FMT_RGB32) video->use_sws = true; switch (video->pix_fmt) { case PIX_FMT_BGR24: video->scaler.out_fmt = SCALER_FMT_BGR24; break; case PIX_FMT_RGB32: video->scaler.out_fmt = SCALER_FMT_ARGB8888; break; default: break; } } else // Use BGR24 as default out format. { video->pix_fmt = PIX_FMT_BGR24; video->scaler.out_fmt = SCALER_FMT_BGR24; } switch (param->pix_fmt) { case FFEMU_PIX_RGB565: video->scaler.in_fmt = SCALER_FMT_RGB565; video->in_pix_fmt = PIX_FMT_RGB565; video->pix_size = 2; break; case FFEMU_PIX_BGR24: video->scaler.in_fmt = SCALER_FMT_BGR24; video->in_pix_fmt = PIX_FMT_BGR24; video->pix_size = 3; break; case FFEMU_PIX_ARGB8888: video->scaler.in_fmt = SCALER_FMT_ARGB8888; video->in_pix_fmt = PIX_FMT_RGB32; video->pix_size = 4; break; default: return false; } video->codec = avcodec_alloc_context3(codec); // Useful to set scale_factor to 2 for chroma subsampled formats to maintain full chroma resolution. // (Or just use 4:4:4 or RGB ...) param->out_width *= params->scale_factor; param->out_height *= params->scale_factor; video->codec->codec_type = AVMEDIA_TYPE_VIDEO; video->codec->width = param->out_width; video->codec->height = param->out_height; video->codec->time_base = av_d2q((double)params->frame_drop_ratio / param->fps, 1000000); // Arbitrary big number. video->codec->sample_aspect_ratio = av_d2q(param->aspect_ratio * param->out_height / param->out_width, 255); video->codec->pix_fmt = video->pix_fmt; video->codec->thread_count = params->threads; if (params->video_qscale) { video->codec->flags |= CODEC_FLAG_QSCALE; video->codec->global_quality = params->video_global_quality; } else if (params->video_bit_rate) video->codec->bit_rate = params->video_bit_rate; if (handle->muxer.ctx->oformat->flags & AVFMT_GLOBALHEADER) video->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; if (avcodec_open2(video->codec, codec, params->video_opts ? ¶ms->video_opts : NULL) != 0) return false; // Allocate a big buffer :p ffmpeg API doesn't seem to give us some clues how big this buffer should be. video->outbuf_size = 1 << 23; video->outbuf = (uint8_t*)av_malloc(video->outbuf_size); video->frame_drop_ratio = params->frame_drop_ratio; size_t size = avpicture_get_size(video->pix_fmt, param->out_width, param->out_height); video->conv_frame_buf = (uint8_t*)av_malloc(size); video->conv_frame = av_frame_alloc(); avpicture_fill((AVPicture*)video->conv_frame, video->conv_frame_buf, video->pix_fmt, param->out_width, param->out_height); return true; }
int main(int argc, char* argv[]) { AVFormatContext *ifmt_ctx = NULL; AVFormatContext *ifmt_ctx_a = NULL; AVFormatContext *ofmt_ctx; AVInputFormat* ifmt; AVStream* video_st; AVStream* audio_st; AVCodecContext* pCodecCtx; AVCodecContext* pCodecCtx_a; AVCodec* pCodec; AVCodec* pCodec_a; AVPacket *dec_pkt, enc_pkt; AVPacket *dec_pkt_a, enc_pkt_a; AVFrame *pframe, *pFrameYUV; struct SwsContext *img_convert_ctx; struct SwrContext *aud_convert_ctx; char capture_name[80] = { 0 }; char device_name[80] = { 0 }; char device_name_a[80] = { 0 }; int framecnt = 0; int nb_samples = 0; int videoindex; int audioindex; int i; int ret; HANDLE hThread; const char* out_path = "rtmp://localhost/live/livestream"; int dec_got_frame, enc_got_frame; int dec_got_frame_a, enc_got_frame_a; int aud_next_pts = 0; int vid_next_pts = 0; int encode_video = 1, encode_audio = 1; AVRational time_base_q = { 1, AV_TIME_BASE }; av_register_all(); //Register Device avdevice_register_all(); avformat_network_init(); #if USEFILTER //Register Filter avfilter_register_all(); buffersrc = avfilter_get_by_name("buffer"); buffersink = avfilter_get_by_name("buffersink"); #endif //Show Dshow Device show_dshow_device(); printf("\nChoose video capture device: "); if (gets(capture_name) == 0) { printf("Error in gets()\n"); return -1; } sprintf(device_name, "video=%s", capture_name); printf("\nChoose audio capture device: "); if (gets(capture_name) == 0) { printf("Error in gets()\n"); return -1; } sprintf(device_name_a, "audio=%s", capture_name); //wchar_t *cam = L"video=Integrated Camera"; //wchar_t *cam = L"video=YY伴侣"; //char *device_name_utf8 = dup_wchar_to_utf8(cam); //wchar_t *cam_a = L"audio=麦克风阵列 (Realtek High Definition Audio)"; //char *device_name_utf8_a = dup_wchar_to_utf8(cam_a); ifmt = av_find_input_format("dshow"); // Set device params AVDictionary *device_param = 0; //if not setting rtbufsize, error messages will be shown in cmd, but you can still watch or record the stream correctly in most time //setting rtbufsize will erase those error messages, however, larger rtbufsize will bring latency //av_dict_set(&device_param, "rtbufsize", "10M", 0); //Set own video device's name if (avformat_open_input(&ifmt_ctx, device_name, ifmt, &device_param) != 0){ printf("Couldn't open input video stream.(无法打开输入流)\n"); return -1; } //Set own audio device's name if (avformat_open_input(&ifmt_ctx_a, device_name_a, ifmt, &device_param) != 0){ printf("Couldn't open input audio stream.(无法打开输入流)\n"); return -1; } //input video initialize if (avformat_find_stream_info(ifmt_ctx, NULL) < 0) { printf("Couldn't find video stream information.(无法获取流信息)\n"); return -1; } videoindex = -1; for (i = 0; i < ifmt_ctx->nb_streams; i++) if (ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) { videoindex = i; break; } if (videoindex == -1) { printf("Couldn't find a video stream.(没有找到视频流)\n"); return -1; } if (avcodec_open2(ifmt_ctx->streams[videoindex]->codec, avcodec_find_decoder(ifmt_ctx->streams[videoindex]->codec->codec_id), NULL) < 0) { printf("Could not open video codec.(无法打开解码器)\n"); return -1; } //input audio initialize if (avformat_find_stream_info(ifmt_ctx_a, NULL) < 0) { printf("Couldn't find audio stream information.(无法获取流信息)\n"); return -1; } audioindex = -1; for (i = 0; i < ifmt_ctx_a->nb_streams; i++) if (ifmt_ctx_a->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) { audioindex = i; break; } if (audioindex == -1) { printf("Couldn't find a audio stream.(没有找到视频流)\n"); return -1; } if (avcodec_open2(ifmt_ctx_a->streams[audioindex]->codec, avcodec_find_decoder(ifmt_ctx_a->streams[audioindex]->codec->codec_id), NULL) < 0) { printf("Could not open audio codec.(无法打开解码器)\n"); return -1; } //output initialize avformat_alloc_output_context2(&ofmt_ctx, NULL, "flv", out_path); //output video encoder initialize pCodec = avcodec_find_encoder(AV_CODEC_ID_H264); if (!pCodec){ printf("Can not find output video encoder! (没有找到合适的编码器!)\n"); return -1; } pCodecCtx = avcodec_alloc_context3(pCodec); pCodecCtx->pix_fmt = PIX_FMT_YUV420P; pCodecCtx->width = ifmt_ctx->streams[videoindex]->codec->width; pCodecCtx->height = ifmt_ctx->streams[videoindex]->codec->height; pCodecCtx->time_base.num = 1; pCodecCtx->time_base.den = 25; pCodecCtx->bit_rate = 300000; pCodecCtx->gop_size = 250; /* Some formats want stream headers to be separate. */ if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) pCodecCtx->flags |= CODEC_FLAG_GLOBAL_HEADER; //H264 codec param //pCodecCtx->me_range = 16; //pCodecCtx->max_qdiff = 4; //pCodecCtx->qcompress = 0.6; pCodecCtx->qmin = 10; pCodecCtx->qmax = 51; //Optional Param pCodecCtx->max_b_frames = 0; // Set H264 preset and tune AVDictionary *param = 0; av_dict_set(¶m, "preset", "fast", 0); av_dict_set(¶m, "tune", "zerolatency", 0); if (avcodec_open2(pCodecCtx, pCodec, ¶m) < 0){ printf("Failed to open output video encoder! (编码器打开失败!)\n"); return -1; } //Add a new stream to output,should be called by the user before avformat_write_header() for muxing video_st = avformat_new_stream(ofmt_ctx, pCodec); if (video_st == NULL){ return -1; } video_st->time_base.num = 1; video_st->time_base.den = 25; video_st->codec = pCodecCtx; //output audio encoder initialize pCodec_a = avcodec_find_encoder(AV_CODEC_ID_AAC); if (!pCodec_a){ printf("Can not find output audio encoder! (没有找到合适的编码器!)\n"); return -1; } pCodecCtx_a = avcodec_alloc_context3(pCodec_a); pCodecCtx_a->channels = 2; pCodecCtx_a->channel_layout = av_get_default_channel_layout(2); pCodecCtx_a->sample_rate = ifmt_ctx_a->streams[audioindex]->codec->sample_rate; pCodecCtx_a->sample_fmt = pCodec_a->sample_fmts[0]; pCodecCtx_a->bit_rate = 32000; pCodecCtx_a->time_base.num = 1; pCodecCtx_a->time_base.den = pCodecCtx_a->sample_rate; /** Allow the use of the experimental AAC encoder */ pCodecCtx_a->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL; /* Some formats want stream headers to be separate. */ if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) pCodecCtx_a->flags |= CODEC_FLAG_GLOBAL_HEADER; if (avcodec_open2(pCodecCtx_a, pCodec_a, NULL) < 0){ printf("Failed to open ouput audio encoder! (编码器打开失败!)\n"); return -1; } //Add a new stream to output,should be called by the user before avformat_write_header() for muxing audio_st = avformat_new_stream(ofmt_ctx, pCodec_a); if (audio_st == NULL){ return -1; } audio_st->time_base.num = 1; audio_st->time_base.den = pCodecCtx_a->sample_rate; audio_st->codec = pCodecCtx_a; //Open output URL,set before avformat_write_header() for muxing if (avio_open(&ofmt_ctx->pb, out_path, AVIO_FLAG_READ_WRITE) < 0){ printf("Failed to open output file! (输出文件打开失败!)\n"); return -1; } //Show some Information av_dump_format(ofmt_ctx, 0, out_path, 1); //Write File Header avformat_write_header(ofmt_ctx, NULL); //prepare before decode and encode dec_pkt = (AVPacket *)av_malloc(sizeof(AVPacket)); #if USEFILTER #else //camera data may has a pix fmt of RGB or sth else,convert it to YUV420 img_convert_ctx = sws_getContext(ifmt_ctx->streams[videoindex]->codec->width, ifmt_ctx->streams[videoindex]->codec->height, ifmt_ctx->streams[videoindex]->codec->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); // Initialize the resampler to be able to convert audio sample formats aud_convert_ctx = swr_alloc_set_opts(NULL, av_get_default_channel_layout(pCodecCtx_a->channels), pCodecCtx_a->sample_fmt, pCodecCtx_a->sample_rate, av_get_default_channel_layout(ifmt_ctx_a->streams[audioindex]->codec->channels), ifmt_ctx_a->streams[audioindex]->codec->sample_fmt, ifmt_ctx_a->streams[audioindex]->codec->sample_rate, 0, NULL); /** * Perform a sanity check so that the number of converted samples is * not greater than the number of samples to be converted. * If the sample rates differ, this case has to be handled differently */ //av_assert0(pCodecCtx_a->sample_rate == ifmt_ctx_a->streams[audioindex]->codec->sample_rate); swr_init(aud_convert_ctx); #endif //Initialize the buffer to store YUV frames to be encoded. pFrameYUV = av_frame_alloc(); uint8_t *out_buffer = (uint8_t *)av_malloc(avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height)); avpicture_fill((AVPicture *)pFrameYUV, out_buffer, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height); //Initialize the FIFO buffer to store audio samples to be encoded. AVAudioFifo *fifo = NULL; fifo = av_audio_fifo_alloc(pCodecCtx_a->sample_fmt, pCodecCtx_a->channels, 1); //Initialize the buffer to store converted samples to be encoded. uint8_t **converted_input_samples = NULL; /** * Allocate as many pointers as there are audio channels. * Each pointer will later point to the audio samples of the corresponding * channels (although it may be NULL for interleaved formats). */ if (!(converted_input_samples = (uint8_t**)calloc(pCodecCtx_a->channels, sizeof(**converted_input_samples)))) { printf("Could not allocate converted input sample pointers\n"); return AVERROR(ENOMEM); } printf("\n --------call started----------\n"); #if USEFILTER printf("\n Press differnet number for different filters:"); printf("\n 1->Mirror"); printf("\n 2->Add Watermark"); printf("\n 3->Negate"); printf("\n 4->Draw Edge"); printf("\n 5->Split Into 4"); printf("\n 6->Vintage"); printf("\n Press 0 to remove filter\n"); #endif printf("\nPress enter to stop...\n"); hThread = CreateThread( NULL, // default security attributes 0, // use default stack size MyThreadFunction, // thread function name NULL, // argument to thread function 0, // use default creation flags NULL); // returns the thread identifier //start decode and encode int64_t start_time = av_gettime(); while (encode_video || encode_audio) { if (encode_video && (!encode_audio || av_compare_ts(vid_next_pts, time_base_q, aud_next_pts, time_base_q) <= 0)) { if ((ret=av_read_frame(ifmt_ctx, dec_pkt)) >= 0){ if (exit_thread) break; av_log(NULL, AV_LOG_DEBUG, "Going to reencode the frame\n"); pframe = av_frame_alloc(); if (!pframe) { ret = AVERROR(ENOMEM); return ret; } ret = avcodec_decode_video2(ifmt_ctx->streams[dec_pkt->stream_index]->codec, pframe, &dec_got_frame, dec_pkt); if (ret < 0) { av_frame_free(&pframe); av_log(NULL, AV_LOG_ERROR, "Decoding failed\n"); break; } if (dec_got_frame){ #if USEFILTER pframe->pts = av_frame_get_best_effort_timestamp(pframe); if (filter_change) apply_filters(ifmt_ctx); filter_change = 0; /* push the decoded frame into the filtergraph */ if (av_buffersrc_add_frame(buffersrc_ctx, pframe) < 0) { printf("Error while feeding the filtergraph\n"); break; } picref = av_frame_alloc(); /* pull filtered pictures from the filtergraph */ while (1) { ret = av_buffersink_get_frame_flags(buffersink_ctx, picref, 0); if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) break; if (ret < 0) return ret; if (picref) { img_convert_ctx = sws_getContext(picref->width, picref->height, (AVPixelFormat)picref->format, pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); sws_scale(img_convert_ctx, (const uint8_t* const*)picref->data, picref->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize); sws_freeContext(img_convert_ctx); pFrameYUV->width = picref->width; pFrameYUV->height = picref->height; pFrameYUV->format = PIX_FMT_YUV420P; #else sws_scale(img_convert_ctx, (const uint8_t* const*)pframe->data, pframe->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize); pFrameYUV->width = pframe->width; pFrameYUV->height = pframe->height; pFrameYUV->format = PIX_FMT_YUV420P; #endif enc_pkt.data = NULL; enc_pkt.size = 0; av_init_packet(&enc_pkt); ret = avcodec_encode_video2(pCodecCtx, &enc_pkt, pFrameYUV, &enc_got_frame); av_frame_free(&pframe); if (enc_got_frame == 1){ //printf("Succeed to encode frame: %5d\tsize:%5d\n", framecnt, enc_pkt.size); framecnt++; enc_pkt.stream_index = video_st->index; //Write PTS AVRational time_base = ofmt_ctx->streams[0]->time_base;//{ 1, 1000 }; AVRational r_framerate1 = ifmt_ctx->streams[videoindex]->r_frame_rate;//{ 50, 2 }; //Duration between 2 frames (us) int64_t calc_duration = (double)(AV_TIME_BASE)*(1 / av_q2d(r_framerate1)); //内部时间戳 //Parameters //enc_pkt.pts = (double)(framecnt*calc_duration)*(double)(av_q2d(time_base_q)) / (double)(av_q2d(time_base)); enc_pkt.pts = av_rescale_q(framecnt*calc_duration, time_base_q, time_base); enc_pkt.dts = enc_pkt.pts; enc_pkt.duration = av_rescale_q(calc_duration, time_base_q, time_base); //(double)(calc_duration)*(double)(av_q2d(time_base_q)) / (double)(av_q2d(time_base)); enc_pkt.pos = -1; //printf("video pts : %d\n", enc_pkt.pts); vid_next_pts=framecnt*calc_duration; //general timebase //Delay int64_t pts_time = av_rescale_q(enc_pkt.pts, time_base, time_base_q); int64_t now_time = av_gettime() - start_time; if ((pts_time > now_time) && ((vid_next_pts + pts_time - now_time)<aud_next_pts)) av_usleep(pts_time - now_time); ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt); av_free_packet(&enc_pkt); } #if USEFILTER av_frame_unref(picref); } } #endif } else { av_frame_free(&pframe); } av_free_packet(dec_pkt); } else if (ret == AVERROR_EOF) encode_video = 0; else { printf("Could not read video frame\n"); return ret; } } else { //audio trancoding here const int output_frame_size = pCodecCtx_a->frame_size; if (exit_thread) break; /** * Make sure that there is one frame worth of samples in the FIFO * buffer so that the encoder can do its work. * Since the decoder's and the encoder's frame size may differ, we * need to FIFO buffer to store as many frames worth of input samples * that they make up at least one frame worth of output samples. */ while (av_audio_fifo_size(fifo) < output_frame_size) { /** * Decode one frame worth of audio samples, convert it to the * output sample format and put it into the FIFO buffer. */ AVFrame *input_frame = av_frame_alloc(); if (!input_frame) { ret = AVERROR(ENOMEM); return ret; } /** Decode one frame worth of audio samples. */ /** Packet used for temporary storage. */ AVPacket input_packet; av_init_packet(&input_packet); input_packet.data = NULL; input_packet.size = 0; /** Read one audio frame from the input file into a temporary packet. */ if ((ret = av_read_frame(ifmt_ctx_a, &input_packet)) < 0) { /** If we are at the end of the file, flush the decoder below. */ if (ret == AVERROR_EOF) { encode_audio = 0; } else { printf("Could not read audio frame\n"); return ret; } } /** * Decode the audio frame stored in the temporary packet. * The input audio stream decoder is used to do this. * If we are at the end of the file, pass an empty packet to the decoder * to flush it. */ if ((ret = avcodec_decode_audio4(ifmt_ctx_a->streams[audioindex]->codec, input_frame, &dec_got_frame_a, &input_packet)) < 0) { printf("Could not decode audio frame\n"); return ret; } av_packet_unref(&input_packet); /** If there is decoded data, convert and store it */ if (dec_got_frame_a) { /** * Allocate memory for the samples of all channels in one consecutive * block for convenience. */ if ((ret = av_samples_alloc(converted_input_samples, NULL, pCodecCtx_a->channels, input_frame->nb_samples, pCodecCtx_a->sample_fmt, 0)) < 0) { printf("Could not allocate converted input samples\n"); av_freep(&(*converted_input_samples)[0]); free(*converted_input_samples); return ret; } /** * Convert the input samples to the desired output sample format. * This requires a temporary storage provided by converted_input_samples. */ /** Convert the samples using the resampler. */ if ((ret = swr_convert(aud_convert_ctx, converted_input_samples, input_frame->nb_samples, (const uint8_t**)input_frame->extended_data, input_frame->nb_samples)) < 0) { printf("Could not convert input samples\n"); return ret; } /** Add the converted input samples to the FIFO buffer for later processing. */ /** * Make the FIFO as large as it needs to be to hold both, * the old and the new samples. */ if ((ret = av_audio_fifo_realloc(fifo, av_audio_fifo_size(fifo) + input_frame->nb_samples)) < 0) { printf("Could not reallocate FIFO\n"); return ret; } /** Store the new samples in the FIFO buffer. */ if (av_audio_fifo_write(fifo, (void **)converted_input_samples, input_frame->nb_samples) < input_frame->nb_samples) { printf("Could not write data to FIFO\n"); return AVERROR_EXIT; } } } /** * If we have enough samples for the encoder, we encode them. * At the end of the file, we pass the remaining samples to * the encoder. */ if (av_audio_fifo_size(fifo) >= output_frame_size) /** * Take one frame worth of audio samples from the FIFO buffer, * encode it and write it to the output file. */ { /** Temporary storage of the output samples of the frame written to the file. */ AVFrame *output_frame=av_frame_alloc(); if (!output_frame) { ret = AVERROR(ENOMEM); return ret; } /** * Use the maximum number of possible samples per frame. * If there is less than the maximum possible frame size in the FIFO * buffer use this number. Otherwise, use the maximum possible frame size */ const int frame_size = FFMIN(av_audio_fifo_size(fifo), pCodecCtx_a->frame_size); /** Initialize temporary storage for one output frame. */ /** * Set the frame's parameters, especially its size and format. * av_frame_get_buffer needs this to allocate memory for the * audio samples of the frame. * Default channel layouts based on the number of channels * are assumed for simplicity. */ output_frame->nb_samples = frame_size; output_frame->channel_layout = pCodecCtx_a->channel_layout; output_frame->format = pCodecCtx_a->sample_fmt; output_frame->sample_rate = pCodecCtx_a->sample_rate; /** * Allocate the samples of the created frame. This call will make * sure that the audio frame can hold as many samples as specified. */ if ((ret = av_frame_get_buffer(output_frame, 0)) < 0) { printf("Could not allocate output frame samples\n"); av_frame_free(&output_frame); return ret; } /** * Read as many samples from the FIFO buffer as required to fill the frame. * The samples are stored in the frame temporarily. */ if (av_audio_fifo_read(fifo, (void **)output_frame->data, frame_size) < frame_size) { printf("Could not read data from FIFO\n"); return AVERROR_EXIT; } /** Encode one frame worth of audio samples. */ /** Packet used for temporary storage. */ AVPacket output_packet; av_init_packet(&output_packet); output_packet.data = NULL; output_packet.size = 0; /** Set a timestamp based on the sample rate for the container. */ if (output_frame) { nb_samples += output_frame->nb_samples; } /** * Encode the audio frame and store it in the temporary packet. * The output audio stream encoder is used to do this. */ if ((ret = avcodec_encode_audio2(pCodecCtx_a, &output_packet, output_frame, &enc_got_frame_a)) < 0) { printf("Could not encode frame\n"); av_packet_unref(&output_packet); return ret; } /** Write one audio frame from the temporary packet to the output file. */ if (enc_got_frame_a) { output_packet.stream_index = 1; AVRational time_base = ofmt_ctx->streams[1]->time_base; AVRational r_framerate1 = { ifmt_ctx_a->streams[audioindex]->codec->sample_rate, 1 };// { 44100, 1}; int64_t calc_duration = (double)(AV_TIME_BASE)*(1 / av_q2d(r_framerate1)); //内部时间戳 output_packet.pts = av_rescale_q(nb_samples*calc_duration, time_base_q, time_base); output_packet.dts = output_packet.pts; output_packet.duration = output_frame->nb_samples; //printf("audio pts : %d\n", output_packet.pts); aud_next_pts = nb_samples*calc_duration; int64_t pts_time = av_rescale_q(output_packet.pts, time_base, time_base_q); int64_t now_time = av_gettime() - start_time; if ((pts_time > now_time) && ((aud_next_pts + pts_time - now_time)<vid_next_pts)) av_usleep(pts_time - now_time); if ((ret = av_interleaved_write_frame(ofmt_ctx, &output_packet)) < 0) { printf("Could not write frame\n"); av_packet_unref(&output_packet); return ret; } av_packet_unref(&output_packet); } av_frame_free(&output_frame); } } } //Flush Encoder ret = flush_encoder(ifmt_ctx, ofmt_ctx, 0, framecnt); if (ret < 0) { printf("Flushing encoder failed\n"); return -1; } ret = flush_encoder_a(ifmt_ctx_a, ofmt_ctx, 1, nb_samples); if (ret < 0) { printf("Flushing encoder failed\n"); return -1; } //Write file trailer av_write_trailer(ofmt_ctx); cleanup: //Clean #if USEFILTER if (filter_graph) avfilter_graph_free(&filter_graph); #endif if (video_st) avcodec_close(video_st->codec); if (audio_st) avcodec_close(audio_st->codec); av_free(out_buffer); if (converted_input_samples) { av_freep(&converted_input_samples[0]); //free(converted_input_samples); } if (fifo) av_audio_fifo_free(fifo); avio_close(ofmt_ctx->pb); avformat_free_context(ifmt_ctx); avformat_free_context(ifmt_ctx_a); avformat_free_context(ofmt_ctx); CloseHandle(hThread); return 0; }
bool FFmpegEncoderOpen(struct FFmpegEncoder* encoder, const char* outfile) { AVCodec* acodec = avcodec_find_encoder_by_name(encoder->audioCodec); AVCodec* vcodec = avcodec_find_encoder_by_name(encoder->videoCodec); if ((encoder->audioCodec && !acodec) || !vcodec || !FFmpegEncoderVerifyContainer(encoder)) { return false; } encoder->currentAudioSample = 0; encoder->currentAudioFrame = 0; encoder->currentVideoFrame = 0; encoder->nextAudioPts = 0; AVOutputFormat* oformat = av_guess_format(encoder->containerFormat, 0, 0); #ifndef USE_LIBAV avformat_alloc_output_context2(&encoder->context, oformat, 0, outfile); #else encoder->context = avformat_alloc_context(); strncpy(encoder->context->filename, outfile, sizeof(encoder->context->filename) - 1); encoder->context->filename[sizeof(encoder->context->filename) - 1] = '\0'; encoder->context->oformat = oformat; #endif if (acodec) { #ifdef FFMPEG_USE_CODECPAR encoder->audioStream = avformat_new_stream(encoder->context, NULL); encoder->audio = avcodec_alloc_context3(acodec); #else encoder->audioStream = avformat_new_stream(encoder->context, acodec); encoder->audio = encoder->audioStream->codec; #endif encoder->audio->bit_rate = encoder->audioBitrate; encoder->audio->channels = 2; encoder->audio->channel_layout = AV_CH_LAYOUT_STEREO; encoder->audio->sample_rate = encoder->sampleRate; encoder->audio->sample_fmt = encoder->sampleFormat; AVDictionary* opts = 0; av_dict_set(&opts, "strict", "-2", 0); if (encoder->context->oformat->flags & AVFMT_GLOBALHEADER) { encoder->audio->flags |= CODEC_FLAG_GLOBAL_HEADER; } avcodec_open2(encoder->audio, acodec, &opts); av_dict_free(&opts); #if LIBAVCODEC_VERSION_MAJOR >= 55 encoder->audioFrame = av_frame_alloc(); #else encoder->audioFrame = avcodec_alloc_frame(); #endif if (!encoder->audio->frame_size) { encoder->audio->frame_size = 1; } encoder->audioFrame->nb_samples = encoder->audio->frame_size; encoder->audioFrame->format = encoder->audio->sample_fmt; encoder->audioFrame->pts = 0; encoder->resampleContext = avresample_alloc_context(); av_opt_set_int(encoder->resampleContext, "in_channel_layout", AV_CH_LAYOUT_STEREO, 0); av_opt_set_int(encoder->resampleContext, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0); av_opt_set_int(encoder->resampleContext, "in_sample_rate", PREFERRED_SAMPLE_RATE, 0); av_opt_set_int(encoder->resampleContext, "out_sample_rate", encoder->sampleRate, 0); av_opt_set_int(encoder->resampleContext, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0); av_opt_set_int(encoder->resampleContext, "out_sample_fmt", encoder->sampleFormat, 0); avresample_open(encoder->resampleContext); encoder->audioBufferSize = (encoder->audioFrame->nb_samples * PREFERRED_SAMPLE_RATE / encoder->sampleRate) * 4; encoder->audioBuffer = av_malloc(encoder->audioBufferSize); encoder->postaudioBufferSize = av_samples_get_buffer_size(0, encoder->audio->channels, encoder->audio->frame_size, encoder->audio->sample_fmt, 0); encoder->postaudioBuffer = av_malloc(encoder->postaudioBufferSize); avcodec_fill_audio_frame(encoder->audioFrame, encoder->audio->channels, encoder->audio->sample_fmt, (const uint8_t*) encoder->postaudioBuffer, encoder->postaudioBufferSize, 0); if (encoder->audio->codec->id == AV_CODEC_ID_AAC && (strcasecmp(encoder->containerFormat, "mp4") || strcasecmp(encoder->containerFormat, "m4v") || strcasecmp(encoder->containerFormat, "mov"))) { // MP4 container doesn't support the raw ADTS AAC format that the encoder spits out #ifdef FFMPEG_USE_NEW_BSF av_bsf_alloc(av_bsf_get_by_name("aac_adtstoasc"), &encoder->absf); avcodec_parameters_from_context(encoder->absf->par_in, encoder->audio); av_bsf_init(encoder->absf); #else encoder->absf = av_bitstream_filter_init("aac_adtstoasc"); #endif } #ifdef FFMPEG_USE_CODECPAR avcodec_parameters_from_context(encoder->audioStream->codecpar, encoder->audio); #endif } #ifdef FFMPEG_USE_CODECPAR encoder->videoStream = avformat_new_stream(encoder->context, NULL); encoder->video = avcodec_alloc_context3(vcodec); #else encoder->videoStream = avformat_new_stream(encoder->context, vcodec); encoder->video = encoder->videoStream->codec; #endif encoder->video->bit_rate = encoder->videoBitrate; encoder->video->width = encoder->width; encoder->video->height = encoder->height; encoder->video->time_base = (AVRational) { VIDEO_TOTAL_LENGTH, GBA_ARM7TDMI_FREQUENCY }; encoder->video->pix_fmt = encoder->pixFormat; encoder->video->gop_size = 60; encoder->video->max_b_frames = 3; if (encoder->context->oformat->flags & AVFMT_GLOBALHEADER) { encoder->video->flags |= CODEC_FLAG_GLOBAL_HEADER; } if (strcmp(vcodec->name, "libx264") == 0) { // Try to adaptively figure out when you can use a slower encoder if (encoder->width * encoder->height > 1000000) { av_opt_set(encoder->video->priv_data, "preset", "superfast", 0); } else if (encoder->width * encoder->height > 500000) { av_opt_set(encoder->video->priv_data, "preset", "veryfast", 0); } else { av_opt_set(encoder->video->priv_data, "preset", "faster", 0); } av_opt_set(encoder->video->priv_data, "tune", "zerolatency", 0); } avcodec_open2(encoder->video, vcodec, 0); #if LIBAVCODEC_VERSION_MAJOR >= 55 encoder->videoFrame = av_frame_alloc(); #else encoder->videoFrame = avcodec_alloc_frame(); #endif encoder->videoFrame->format = encoder->video->pix_fmt; encoder->videoFrame->width = encoder->video->width; encoder->videoFrame->height = encoder->video->height; encoder->videoFrame->pts = 0; _ffmpegSetVideoDimensions(&encoder->d, encoder->iwidth, encoder->iheight); av_image_alloc(encoder->videoFrame->data, encoder->videoFrame->linesize, encoder->video->width, encoder->video->height, encoder->video->pix_fmt, 32); #ifdef FFMPEG_USE_CODECPAR avcodec_parameters_from_context(encoder->videoStream->codecpar, encoder->video); #endif avio_open(&encoder->context->pb, outfile, AVIO_FLAG_WRITE); return avformat_write_header(encoder->context, 0) >= 0; }
DWORD WINAPI MyThreadFunction(LPVOID lpParam) { #if USEFILTER int ch = getchar(); while (ch != '\n') { switch (ch){ case FILTER_NULL: { printf("\nnow using null filter\nPress other numbers for other filters:"); filter_change = 1; filter_descr = "null"; getchar(); ch = getchar(); break; } case FILTER_MIRROR: { printf("\nnow using mirror filter\nPress other numbers for other filters:"); filter_change = 1; filter_descr = filter_mirror; getchar(); ch = getchar(); break; } case FILTER_WATERMATK: { printf("\nnow using watermark filter\nPress other numbers for other filters:"); filter_change = 1; filter_descr = filter_watermark; getchar(); ch = getchar(); break; } case FILTER_NEGATE: { printf("\nnow using negate filter\nPress other numbers for other filters:"); filter_change = 1; filter_descr = filter_negate; getchar(); ch = getchar(); break; } case FILTER_EDGE: { printf("\nnow using edge filter\nPress other numbers for other filters:"); filter_change = 1; filter_descr = filter_edge; getchar(); ch = getchar(); break; } case FILTER_SPLIT4: { printf("\nnow using split4 filter\nPress other numbers for other filters:"); filter_change = 1; filter_descr = filter_split4; getchar(); ch = getchar(); break; } case FILTER_VINTAGE: { printf("\nnow using vintage filter\nPress other numbers for other filters:"); filter_change = 1; filter_descr = filter_vintage; getchar(); ch = getchar(); break; } default: { getchar(); ch = getchar(); break; } } #else while ((getchar()) != '\n') { ; #endif } exit_thread = 1; return 0; } //Show Device void show_dshow_device(){ AVFormatContext *pFmtCtx = avformat_alloc_context(); AVDictionary* options = NULL; av_dict_set(&options, "list_devices", "true", 0); AVInputFormat *iformat = av_find_input_format("dshow"); printf("Device Info=============\n"); avformat_open_input(&pFmtCtx, "video=dummy", iformat, &options); printf("========================\n"); }
// init driver static int init(sh_video_t *sh){ AVCodecContext *avctx; vd_ffmpeg_ctx *ctx; AVCodec *lavc_codec; int lowres_w=0; int do_vis_debug= lavc_param_vismv || (lavc_param_debug&(FF_DEBUG_VIS_MB_TYPE|FF_DEBUG_VIS_QP)); // slice is rather broken with threads, so disable that combination unless // explicitly requested int use_slices = vd_use_slices > 0 || (vd_use_slices < 0 && lavc_param_threads <= 1); AVDictionary *opts = NULL; init_avcodec(); ctx = sh->context = malloc(sizeof(vd_ffmpeg_ctx)); if (!ctx) return 0; memset(ctx, 0, sizeof(vd_ffmpeg_ctx)); lavc_codec = avcodec_find_decoder_by_name(sh->codec->dll); if(!lavc_codec){ mp_msg(MSGT_DECVIDEO, MSGL_ERR, MSGTR_MissingLAVCcodec, sh->codec->dll); uninit(sh); return 0; } if (auto_threads) { #if (defined(__MINGW32__) && HAVE_PTHREADS) lavc_param_threads = pthread_num_processors_np(); #elif defined(__linux__) /* Code stolen from x264 :) */ unsigned int bit; int np; cpu_set_t p_aff; memset( &p_aff, 0, sizeof(p_aff) ); sched_getaffinity( 0, sizeof(p_aff), &p_aff ); for( np = 0, bit = 0; bit < sizeof(p_aff); bit++ ) np += (((uint8_t *)&p_aff)[bit / 8] >> (bit % 8)) & 1; lavc_param_threads = np; #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__) || defined(__APPLE__) || defined (__NetBSD__) || defined(__OpenBSD__) /* Code stolen from x264 :) */ int numberOfCPUs; size_t length = sizeof( numberOfCPUs ); #if defined (__NetBSD__) || defined(__OpenBSD__) int mib[2] = { CTL_HW, HW_NCPU }; if( sysctl(mib, 2, &numberOfCPUs, &length, NULL, 0) ) #else if( sysctlbyname("hw.ncpu", &numberOfCPUs, &length, NULL, 0) ) #endif { numberOfCPUs = 1; } lavc_param_threads = numberOfCPUs; #endif if(lavc_codec->id == CODEC_ID_H264 || lavc_codec->id == CODEC_ID_FFH264 || lavc_codec->id == CODEC_ID_MPEG2TS || lavc_codec->id == CODEC_ID_MPEG2VIDEO_XVMC) { if (lavc_param_threads > 16) lavc_param_threads = 16; if (lavc_param_threads > 1 && (lavc_codec->id == CODEC_ID_MPEG2VIDEO || lavc_codec->id == CODEC_ID_MPEG2TS || lavc_codec->id == CODEC_ID_MPEG2VIDEO_XVMC)) vd_use_slices = 0; mp_msg(MSGT_DECVIDEO, MSGL_INFO, "Spawning %d decoding thread%s...\n", lavc_param_threads, lavc_param_threads == 1 ? "" : "s"); } else { lavc_param_threads = 1; } } if(use_slices && (lavc_codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND) && !do_vis_debug) ctx->do_slices=1; if(lavc_codec->capabilities&CODEC_CAP_DR1 && !do_vis_debug && lavc_codec->id != CODEC_ID_INTERPLAY_VIDEO && lavc_codec->id != CODEC_ID_VP8) ctx->do_dr1=1; ctx->nonref_dr = lavc_codec->id == CODEC_ID_H264; ctx->ip_count= ctx->b_count= 0; ctx->pic = avcodec_alloc_frame(); ctx->avctx = avcodec_alloc_context3(lavc_codec); avctx = ctx->avctx; avctx->opaque = sh; avctx->codec_id = lavc_codec->id; avctx->get_format = get_format; if(ctx->do_dr1){ avctx->flags|= CODEC_FLAG_EMU_EDGE; avctx-> get_buffer= get_buffer; avctx->release_buffer= release_buffer; avctx-> reget_buffer= get_buffer; } avctx->flags|= lavc_param_bitexact; avctx->coded_width = sh->disp_w; avctx->coded_height= sh->disp_h; avctx->workaround_bugs= lavc_param_workaround_bugs; switch (lavc_param_error_resilience) { case 5: avctx->err_recognition |= AV_EF_EXPLODE | AV_EF_COMPLIANT | AV_EF_CAREFUL; break; case 4: case 3: avctx->err_recognition |= AV_EF_AGGRESSIVE; // Fallthrough case 2: avctx->err_recognition |= AV_EF_COMPLIANT; // Fallthrough case 1: avctx->err_recognition |= AV_EF_CAREFUL; } lavc_param_gray|= CODEC_FLAG_GRAY; #ifdef CODEC_FLAG2_SHOW_ALL if(!lavc_param_wait_keyframe) avctx->flags2 |= CODEC_FLAG2_SHOW_ALL; #endif avctx->flags2|= lavc_param_fast; avctx->codec_tag= sh->format; avctx->stream_codec_tag= sh->video.fccHandler; avctx->idct_algo= lavc_param_idct_algo; avctx->error_concealment= lavc_param_error_concealment; avctx->debug= lavc_param_debug; if (lavc_param_debug) av_log_set_level(AV_LOG_DEBUG); avctx->debug_mv= lavc_param_vismv; avctx->skip_top = lavc_param_skip_top; avctx->skip_bottom= lavc_param_skip_bottom; if(lavc_param_lowres_str != NULL) { sscanf(lavc_param_lowres_str, "%d,%d", &lavc_param_lowres, &lowres_w); if(lavc_param_lowres < 1 || lavc_param_lowres > 16 || (lowres_w > 0 && avctx->width < lowres_w)) lavc_param_lowres = 0; avctx->lowres = lavc_param_lowres; } avctx->skip_loop_filter = str2AVDiscard(lavc_param_skip_loop_filter_str); avctx->skip_idct = str2AVDiscard(lavc_param_skip_idct_str); avctx->skip_frame = str2AVDiscard(lavc_param_skip_frame_str); if(lavc_avopt){ if (parse_avopts(avctx, lavc_avopt) < 0 && (!lavc_codec->priv_class || parse_avopts(avctx->priv_data, lavc_avopt) < 0)) { mp_msg(MSGT_DECVIDEO, MSGL_ERR, "Your options /%s/ look like gibberish to me pal\n", lavc_avopt); uninit(sh); return 0; } } skip_idct = avctx->skip_idct; skip_frame = avctx->skip_frame; mp_dbg(MSGT_DECVIDEO, MSGL_DBG2, "libavcodec.size: %d x %d\n", avctx->width, avctx->height); switch (sh->format) { case mmioFOURCC('S','V','Q','3'): /* SVQ3 extradata can show up as sh->ImageDesc if demux_mov is used, or in the phony AVI header if demux_lavf is used. The first case is handled here; the second case falls through to the next section. */ if (sh->ImageDesc) { avctx->extradata_size = (*(int *)sh->ImageDesc) - sizeof(int); avctx->extradata = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE); memcpy(avctx->extradata, ((int *)sh->ImageDesc)+1, avctx->extradata_size); break; } /* fallthrough */ case mmioFOURCC('A','V','R','n'): case mmioFOURCC('M','J','P','G'): /* AVRn stores huffman table in AVI header */ /* Pegasus MJPEG stores it also in AVI header, but it uses the common MJPG fourcc :( */ if (!sh->bih || sh->bih->biSize <= sizeof(*sh->bih)) break; av_dict_set(&opts, "extern_huff", "1", 0); avctx->extradata_size = sh->bih->biSize-sizeof(*sh->bih); avctx->extradata = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE); memcpy(avctx->extradata, sh->bih+1, avctx->extradata_size); #if 0 { int x; uint8_t *p = avctx->extradata; for (x=0; x<avctx->extradata_size; x++) mp_msg(MSGT_DECVIDEO, MSGL_INFO, "[%x] ", p[x]); mp_msg(MSGT_DECVIDEO, MSGL_INFO, "\n"); } #endif break; case mmioFOURCC('R', 'V', '1', '0'): case mmioFOURCC('R', 'V', '1', '3'): case mmioFOURCC('R', 'V', '2', '0'): case mmioFOURCC('R', 'V', '3', '0'): case mmioFOURCC('R', 'V', '4', '0'): if(sh->bih->biSize<sizeof(*sh->bih)+8){ /* only 1 packet per frame & sub_id from fourcc */ avctx->extradata_size= 8; avctx->extradata = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE); ((uint32_t *)avctx->extradata)[0] = 0; ((uint32_t *)avctx->extradata)[1] = (sh->format == mmioFOURCC('R', 'V', '1', '3')) ? 0x10003001 : 0x10000000; } else { /* has extra slice header (demux_rm or rm->avi streamcopy) */ avctx->extradata_size = sh->bih->biSize-sizeof(*sh->bih); avctx->extradata = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE); memcpy(avctx->extradata, sh->bih+1, avctx->extradata_size); } avctx->sub_id= AV_RB32(avctx->extradata+4); // printf("%X %X %d %d\n", extrahdr[0], extrahdr[1]); break; default: if (!sh->bih || sh->bih->biSize <= sizeof(*sh->bih)) break; avctx->extradata_size = sh->bih->biSize-sizeof(*sh->bih); avctx->extradata = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE); memcpy(avctx->extradata, sh->bih+1, avctx->extradata_size); break; } if(sh->bih) avctx->bits_per_coded_sample= sh->bih->biBitCount; avctx->thread_count = lavc_param_threads; avctx->thread_type = FF_THREAD_FRAME | FF_THREAD_SLICE; if(lavc_codec->capabilities & CODEC_CAP_HWACCEL) // HACK around badly placed checks in mpeg_mc_decode_init set_format_params(avctx, PIX_FMT_XVMC_MPEG2_IDCT); /* open it */ if (avcodec_open2(avctx, lavc_codec, &opts) < 0) { mp_msg(MSGT_DECVIDEO, MSGL_ERR, MSGTR_CantOpenCodec); uninit(sh); return 0; } av_dict_free(&opts); // this is necessary in case get_format was never called and init_vo is // too late e.g. for H.264 VDPAU set_format_params(avctx, avctx->pix_fmt); mp_msg(MSGT_DECVIDEO, MSGL_V, "INFO: libavcodec init OK!\n"); return 1; //mpcodecs_config_vo(sh, sh->disp_w, sh->disp_h, IMGFMT_YV12); }
int main(int argc, char* argv[]) { AVFormatContext *pFormatCtx; AVFormatContext *pFormatCtx_o; //_o表示和输出有关联 AVFormatContext *pFormatCtx_test; //_test表示读取外置文件,推流时需要读取其头信息 unsigned int videoindex_test; unsigned int i, videoindex; unsigned int videoindex_o; AVCodecContext *pCodecCtx; AVCodecContext *pCodecCtx_o; AVCodec *pCodec; AVCodec *pCodec_o; AVPacket pkt_o; AVStream *video_st_o; unsigned int frame_cnt; AVOutputFormat *fmt_o; AVFrame *pFrame_o; unsigned int picture_size; uint8_t *picture_buf; ofstream error_log("error_log.log"); unsigned int start=0; unsigned int start1=0; unsigned int j=1; //j记录的是pkt的pts信息 HANDLE Handle_encode=NULL; //编码h.264线程 HANDLE Handle_flushing=NULL; //清空内存视频缓存数据线程 HANDLE Handle_dis=NULL; //刷新显示线程 av_register_all(); avformat_network_init(); pFormatCtx = avformat_alloc_context(); //Open File //char filepath[]="src01_480x272_22.h265"; //avformat_open_input(&pFormatCtx,filepath,NULL,NULL) //Register Device avdevice_register_all(); //注册libavdevice //Windows #ifdef _WIN32 //Show Dshow Device show_dshow_device(); //Show Device Options show_dshow_device_option(); //Show VFW Options show_vfw_device(); #if USE_DSHOW AVInputFormat *ifmt=av_find_input_format("dshow"); AVDictionary* options = NULL; av_dict_set(&options,"video_size","320x240",0); //设置更改分辨率 //av_set_options_string(); //Set own video device's name if(avformat_open_input(&pFormatCtx,"video=QuickCam Orbit/Sphere AF",ifmt,&options)!=0){ printf("Couldn't open input stream.(无法打开输入流)\n"); return -1; } #else AVInputFormat *ifmt=av_find_input_format("vfwcap"); if(avformat_open_input(&pFormatCtx,"0",ifmt,NULL)!=0){ printf("Couldn't open input stream.(无法打开输入流)\n"); return -1; } #endif #endif //Linux #ifdef linux AVInputFormat *ifmt=av_find_input_format("video4linux2"); if(avformat_open_input(&pFormatCtx,"/dev/video0",ifmt,NULL)!=0){ printf("Couldn't open input stream.(无法打开输入流)\n"); return -1; } #endif if(avformat_find_stream_info(pFormatCtx,NULL)<0) { printf("Couldn't find stream information.(无法获取流信息)\n"); return -1; } videoindex=-1; for(i=0; i<pFormatCtx->nb_streams; i++) if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) { videoindex=i; break; } if(videoindex==-1) { printf("Couldn't find a video stream.(没有找到视频流)\n"); return -1; } pCodecCtx=pFormatCtx->streams[videoindex]->codec; pCodec=avcodec_find_decoder(pCodecCtx->codec_id); if(pCodec==NULL) { printf("Codec not found.(没有找到解码器)\n"); return -1; } if(avcodec_open2(pCodecCtx, pCodec,NULL)<0) { printf("Could not open codec.(无法打开解码器)\n"); return -1; } AVFrame *pFrame,*pFrameYUV; pFrame=avcodec_alloc_frame(); pFrameYUV=avcodec_alloc_frame(); uint8_t *out_buffer=(uint8_t *)av_malloc(avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height)); avpicture_fill((AVPicture *)pFrameYUV, out_buffer, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height); //SDL---------------------------- if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) { printf( "Could not initialize SDL - %s\n", SDL_GetError()); return -1; } int screen_w=0; int screen_h=0; SDL_Surface *screen; ////////////////////////////////////////////////////////////////////////// //王全洲 2015年5月29日19:46:56 增加在YUV上显示时间的功能 SDL_Surface *pText; TTF_Font *font; char szTime[32]={"hello world"}; //初始化字体库 if (TTF_Init()==-1) { cout<<"初始化字库失败!"<<endl; return 0; } /* 打开simfang.ttf 字库,设字体为20号 */ font=TTF_OpenFont("C:\\Windows\\Fonts\\Arial.ttf",20); if (font==NULL) { cout<<"error load C:\\Windows\Fonts\\Arial.ttf,please make sure it is existed"<<endl; return 0; }
bool ExportFFmpeg::InitCodecs(AudacityProject *project) { AVCodec *codec = NULL; AVDictionary *options = NULL; // Configure the audio stream's codec context. mEncAudioCodecCtx = mEncAudioStream->codec; mEncAudioCodecCtx->codec_id = ExportFFmpegOptions::fmts[mSubFormat].codecid; mEncAudioCodecCtx->codec_type = AVMEDIA_TYPE_AUDIO; mEncAudioCodecCtx->codec_tag = av_codec_get_tag((const AVCodecTag **)mEncFormatCtx->oformat->codec_tag,mEncAudioCodecCtx->codec_id); mSampleRate = (int)project->GetRate(); mEncAudioCodecCtx->global_quality = -99999; //quality mode is off by default; // Each export type has its own settings switch (mSubFormat) { case FMT_M4A: mEncAudioCodecCtx->bit_rate = 98000; mEncAudioCodecCtx->bit_rate *= mChannels; mEncAudioCodecCtx->profile = FF_PROFILE_AAC_LOW; mEncAudioCodecCtx->cutoff = 0; mEncAudioCodecCtx->global_quality = gPrefs->Read(wxT("/FileFormats/AACQuality"),-99999); if (!CheckSampleRate(mSampleRate, ExportFFmpegOptions::iAACSampleRates[0], ExportFFmpegOptions::iAACSampleRates[11], &ExportFFmpegOptions::iAACSampleRates[0])) { mSampleRate = AskResample(mEncAudioCodecCtx->bit_rate,mSampleRate, ExportFFmpegOptions::iAACSampleRates[0], ExportFFmpegOptions::iAACSampleRates[11], &ExportFFmpegOptions::iAACSampleRates[0]); } break; case FMT_AC3: mEncAudioCodecCtx->bit_rate = gPrefs->Read(wxT("/FileFormats/AC3BitRate"), 192000); if (!CheckSampleRate(mSampleRate,ExportFFmpegAC3Options::iAC3SampleRates[0], ExportFFmpegAC3Options::iAC3SampleRates[2], &ExportFFmpegAC3Options::iAC3SampleRates[0])) mSampleRate = AskResample(mEncAudioCodecCtx->bit_rate,mSampleRate, ExportFFmpegAC3Options::iAC3SampleRates[0], ExportFFmpegAC3Options::iAC3SampleRates[2], &ExportFFmpegAC3Options::iAC3SampleRates[0]); break; case FMT_AMRNB: mSampleRate = 8000; mEncAudioCodecCtx->bit_rate = gPrefs->Read(wxT("/FileFormats/AMRNBBitRate"), 12200); break; case FMT_WMA2: mEncAudioCodecCtx->bit_rate = gPrefs->Read(wxT("/FileFormats/WMABitRate"), 198000); if (!CheckSampleRate(mSampleRate,ExportFFmpegWMAOptions::iWMASampleRates[0], ExportFFmpegWMAOptions::iWMASampleRates[4], &ExportFFmpegWMAOptions::iWMASampleRates[0])) mSampleRate = AskResample(mEncAudioCodecCtx->bit_rate,mSampleRate, ExportFFmpegWMAOptions::iWMASampleRates[0], ExportFFmpegWMAOptions::iWMASampleRates[4], &ExportFFmpegWMAOptions::iWMASampleRates[0]); break; case FMT_OTHER: av_dict_set(&mEncAudioStream->metadata, "language", gPrefs->Read(wxT("/FileFormats/FFmpegLanguage"),wxT("")).ToUTF8(), 0); mEncAudioCodecCtx->sample_rate = gPrefs->Read(wxT("/FileFormats/FFmpegSampleRate"),(long)0); if (mEncAudioCodecCtx->sample_rate != 0) mSampleRate = mEncAudioCodecCtx->sample_rate; mEncAudioCodecCtx->bit_rate = gPrefs->Read(wxT("/FileFormats/FFmpegBitRate"), (long)0); strncpy((char *)&mEncAudioCodecCtx->codec_tag,gPrefs->Read(wxT("/FileFormats/FFmpegTag"),wxT("")).mb_str(wxConvUTF8),4); mEncAudioCodecCtx->global_quality = gPrefs->Read(wxT("/FileFormats/FFmpegQuality"),(long)-99999); mEncAudioCodecCtx->cutoff = gPrefs->Read(wxT("/FileFormats/FFmpegCutOff"),(long)0); mEncAudioCodecCtx->flags2 = 0; if (gPrefs->Read(wxT("/FileFormats/FFmpegBitReservoir"),true)) av_dict_set(&options, "reservoir", "1", 0); if (gPrefs->Read(wxT("/FileFormats/FFmpegVariableBlockLen"),true)) mEncAudioCodecCtx->flags2 |= 0x0004; //WMA only? mEncAudioCodecCtx->compression_level = gPrefs->Read(wxT("/FileFormats/FFmpegCompLevel"),-1); mEncAudioCodecCtx->frame_size = gPrefs->Read(wxT("/FileFormats/FFmpegFrameSize"),(long)0); //FIXME The list of supported options for the seleced encoder should be extracted instead of a few hardcoded set_dict_int(&options, "lpc_coeff_precision", gPrefs->Read(wxT("/FileFormats/FFmpegLPCCoefPrec"),(long)0)); set_dict_int(&options, "min_prediction_order", gPrefs->Read(wxT("/FileFormats/FFmpegMinPredOrder"),(long)-1)); set_dict_int(&options, "max_prediction_order", gPrefs->Read(wxT("/FileFormats/FFmpegMaxPredOrder"),(long)-1)); set_dict_int(&options, "min_partition_order", gPrefs->Read(wxT("/FileFormats/FFmpegMinPartOrder"),(long)-1)); set_dict_int(&options, "max_partition_order", gPrefs->Read(wxT("/FileFormats/FFmpegMaxPartOrder"),(long)-1)); set_dict_int(&options, "prediction_order_method", gPrefs->Read(wxT("/FileFormats/FFmpegPredOrderMethod"),(long)0)); set_dict_int(&options, "muxrate", gPrefs->Read(wxT("/FileFormats/FFmpegMuxRate"),(long)0)); mEncFormatCtx->packet_size = gPrefs->Read(wxT("/FileFormats/FFmpegPacketSize"),(long)0); codec = avcodec_find_encoder_by_name(gPrefs->Read(wxT("/FileFormats/FFmpegCodec")).ToUTF8()); if (!codec) mEncAudioCodecCtx->codec_id = mEncFormatDesc->audio_codec; break; default: return false; } // This happens if user refused to resample the project if (mSampleRate == 0) return false; if (mEncAudioCodecCtx->global_quality >= 0) { mEncAudioCodecCtx->flags |= CODEC_FLAG_QSCALE; } else mEncAudioCodecCtx->global_quality = 0; mEncAudioCodecCtx->global_quality = mEncAudioCodecCtx->global_quality * FF_QP2LAMBDA; mEncAudioCodecCtx->sample_rate = mSampleRate; mEncAudioCodecCtx->channels = mChannels; mEncAudioCodecCtx->time_base.num = 1; mEncAudioCodecCtx->time_base.den = mEncAudioCodecCtx->sample_rate; mEncAudioCodecCtx->sample_fmt = AV_SAMPLE_FMT_S16; mEncAudioCodecCtx->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL; if (mEncAudioCodecCtx->codec_id == AV_CODEC_ID_AC3) { // As of Jan 4, 2011, the default AC3 encoder only accept SAMPLE_FMT_FLT samples. // But, currently, Audacity only supports SAMPLE_FMT_S16. So, for now, look for the // "older" AC3 codec. this is not a proper solution, but will suffice until other // encoders no longer support SAMPLE_FMT_S16. codec = avcodec_find_encoder_by_name("ac3_fixed"); } if (!codec) { codec = avcodec_find_encoder(mEncAudioCodecCtx->codec_id); } // Is the required audio codec compiled into libavcodec? if (codec == NULL) { wxMessageBox(wxString::Format(_("FFmpeg cannot find audio codec 0x%x.\nSupport for this codec is probably not compiled in."), (unsigned int) mEncAudioCodecCtx->codec_id), _("FFmpeg Error"), wxOK|wxCENTER|wxICON_EXCLAMATION); return false; } if (codec->sample_fmts) { for (int i=0; codec->sample_fmts[i] != AV_SAMPLE_FMT_NONE; i++) { enum AVSampleFormat fmt = codec->sample_fmts[i]; if ( fmt == AV_SAMPLE_FMT_U8 || fmt == AV_SAMPLE_FMT_U8P || fmt == AV_SAMPLE_FMT_S16 || fmt == AV_SAMPLE_FMT_S16P || fmt == AV_SAMPLE_FMT_S32 || fmt == AV_SAMPLE_FMT_S32P || fmt == AV_SAMPLE_FMT_FLT || fmt == AV_SAMPLE_FMT_FLTP) { mEncAudioCodecCtx->sample_fmt = fmt; } if ( fmt == AV_SAMPLE_FMT_S16 || fmt == AV_SAMPLE_FMT_S16P) break; } } if (mEncFormatCtx->oformat->flags & AVFMT_GLOBALHEADER) { mEncAudioCodecCtx->flags |= CODEC_FLAG_GLOBAL_HEADER; mEncFormatCtx->flags |= CODEC_FLAG_GLOBAL_HEADER; } // Open the codec. if (avcodec_open2(mEncAudioCodecCtx, codec, &options) < 0) { wxMessageBox(wxString::Format(_("FFmpeg : ERROR - Can't open audio codec 0x%x."),mEncAudioCodecCtx->codec_id), _("FFmpeg Error"), wxOK|wxCENTER|wxICON_EXCLAMATION); return false; } default_frame_size = mEncAudioCodecCtx->frame_size; if (default_frame_size == 0) default_frame_size = 1024; // arbitrary non zero value; wxLogDebug(wxT("FFmpeg : Audio Output Codec Frame Size: %d samples."), mEncAudioCodecCtx->frame_size); // The encoder may require a minimum number of raw audio samples for each encoding but we can't // guarantee we'll get this minimum each time an audio frame is decoded from the input file so // we use a FIFO to store up incoming raw samples until we have enough for one call to the codec. mEncAudioFifo = av_fifo_alloc(1024); mEncAudioFifoOutBufSiz = 2*MAX_AUDIO_PACKET_SIZE; // Allocate a buffer to read OUT of the FIFO into. The FIFO maintains its own buffer internally. if ((mEncAudioFifoOutBuf = (uint8_t*)av_malloc(mEncAudioFifoOutBufSiz)) == NULL) { wxMessageBox(wxString::Format(_("FFmpeg : ERROR - Can't allocate buffer to read into from audio FIFO.")), _("FFmpeg Error"), wxOK|wxCENTER|wxICON_EXCLAMATION); return false; } return true; }
bool AudioEncoderFFmpegPrivate::open() { if (codec_name.isEmpty()) { // copy ctx from muxer by copyAVCodecContext AVCodec *codec = avcodec_find_encoder(avctx->codec_id); AV_ENSURE_OK(avcodec_open2(avctx, codec, &dict), false); return true; } AVCodec *codec = avcodec_find_encoder_by_name(codec_name.toUtf8().constData()); if (!codec) { const AVCodecDescriptor* cd = avcodec_descriptor_get_by_name(codec_name.toUtf8().constData()); if (cd) { codec = avcodec_find_encoder(cd->id); } } if (!codec) { qWarning() << "Can not find encoder for codec " << codec_name; return false; } if (avctx) { avcodec_free_context(&avctx); avctx = 0; } avctx = avcodec_alloc_context3(codec); // reset format_used to user defined format. important to update default format if format is invalid format_used = format; if (format.sampleRate() <= 0) { if (codec->supported_samplerates) { qDebug("use first supported sample rate: %d", codec->supported_samplerates[0]); format_used.setSampleRate(codec->supported_samplerates[0]); } else { qWarning("sample rate and supported sample rate are not set. use 44100"); format_used.setSampleRate(44100); } } if (format.sampleFormat() == AudioFormat::SampleFormat_Unknown) { if (codec->sample_fmts) { qDebug("use first supported sample format: %d", codec->sample_fmts[0]); format_used.setSampleFormatFFmpeg((int)codec->sample_fmts[0]); } else { qWarning("sample format and supported sample format are not set. use s16"); format_used.setSampleFormat(AudioFormat::SampleFormat_Signed16); } } if (format.channelLayout() == AudioFormat::ChannelLayout_Unsupported) { if (codec->channel_layouts) { qDebug("use first supported channel layout: %lld", codec->channel_layouts[0]); format_used.setChannelLayoutFFmpeg((qint64)codec->channel_layouts[0]); } else { qWarning("channel layout and supported channel layout are not set. use stereo"); format_used.setChannelLayout(AudioFormat::ChannelLayout_Stereo); } } avctx->sample_fmt = (AVSampleFormat)format_used.sampleFormatFFmpeg(); avctx->channel_layout = format_used.channelLayoutFFmpeg(); avctx->channels = format_used.channels(); avctx->sample_rate = format_used.sampleRate(); avctx->bits_per_raw_sample = format_used.bytesPerSample()*8; /// set the time base. TODO avctx->time_base.num = 1; avctx->time_base.den = format_used.sampleRate(); avctx->bit_rate = bit_rate; qDebug() << format_used; av_dict_set(&dict, "strict", "-2", 0); //aac, vorbis applyOptionsForContext(); // avctx->frame_size will be set in avcodec_open2 AV_ENSURE_OK(avcodec_open2(avctx, codec, &dict), false); // from mpv ao_lavc int pcm_hack = 0; int buffer_size = 0; frame_size = avctx->frame_size; if (frame_size <= 1) pcm_hack = av_get_bits_per_sample(avctx->codec_id)/8; if (pcm_hack) { frame_size = 16384; // "enough" buffer_size = frame_size*pcm_hack*format_used.channels()*2+200; } else { buffer_size = frame_size*format_used.bytesPerSample()*format_used.channels()*2+200; } if (buffer_size < FF_MIN_BUFFER_SIZE) buffer_size = FF_MIN_BUFFER_SIZE; buffer.resize(buffer_size); return true; }
static int rtsp_read_setup(AVFormatContext *s, char* host, char *controlurl) { RTSPState *rt = s->priv_data; RTSPMessageHeader request = { 0 }; int ret = 0; char url[1024]; RTSPStream *rtsp_st; char responseheaders[1024]; int localport = -1; int transportidx = 0; int streamid = 0; ret = rtsp_read_request(s, &request, "SETUP"); if (ret) return ret; rt->seq++; if (!request.nb_transports) { av_log(s, AV_LOG_ERROR, "No transport defined in SETUP\n"); return AVERROR_INVALIDDATA; } for (transportidx = 0; transportidx < request.nb_transports; transportidx++) { if (!request.transports[transportidx].mode_record || (request.transports[transportidx].lower_transport != RTSP_LOWER_TRANSPORT_UDP && request.transports[transportidx].lower_transport != RTSP_LOWER_TRANSPORT_TCP)) { av_log(s, AV_LOG_ERROR, "mode=record/receive not set or transport" " protocol not supported (yet)\n"); return AVERROR_INVALIDDATA; } } if (request.nb_transports > 1) av_log(s, AV_LOG_WARNING, "More than one transport not supported, " "using first of all\n"); for (streamid = 0; streamid < rt->nb_rtsp_streams; streamid++) { if (!strcmp(rt->rtsp_streams[streamid]->control_url, controlurl)) break; } if (streamid == rt->nb_rtsp_streams) { av_log(s, AV_LOG_ERROR, "Unable to find requested track\n"); return AVERROR_STREAM_NOT_FOUND; } rtsp_st = rt->rtsp_streams[streamid]; localport = rt->rtp_port_min; if (request.transports[0].lower_transport == RTSP_LOWER_TRANSPORT_TCP) { rt->lower_transport = RTSP_LOWER_TRANSPORT_TCP; if ((ret = ff_rtsp_open_transport_ctx(s, rtsp_st))) { rtsp_send_reply(s, RTSP_STATUS_TRANSPORT, NULL, request.seq); return ret; } rtsp_st->interleaved_min = request.transports[0].interleaved_min; rtsp_st->interleaved_max = request.transports[0].interleaved_max; snprintf(responseheaders, sizeof(responseheaders), "Transport: " "RTP/AVP/TCP;unicast;mode=receive;interleaved=%d-%d" "\r\n", request.transports[0].interleaved_min, request.transports[0].interleaved_max); } else { do { AVDictionary *opts = NULL; char buf[256]; snprintf(buf, sizeof(buf), "%d", rt->buffer_size); av_dict_set(&opts, "buffer_size", buf, 0); ff_url_join(url, sizeof(url), "rtp", NULL, host, localport, NULL); av_log(s, AV_LOG_TRACE, "Opening: %s", url); ret = ffurl_open_whitelist(&rtsp_st->rtp_handle, url, AVIO_FLAG_READ_WRITE, &s->interrupt_callback, &opts, s->protocol_whitelist, s->protocol_blacklist, NULL); av_dict_free(&opts); if (ret) localport += 2; } while (ret || localport > rt->rtp_port_max); if (localport > rt->rtp_port_max) { rtsp_send_reply(s, RTSP_STATUS_TRANSPORT, NULL, request.seq); return ret; } av_log(s, AV_LOG_TRACE, "Listening on: %d", ff_rtp_get_local_rtp_port(rtsp_st->rtp_handle)); if ((ret = ff_rtsp_open_transport_ctx(s, rtsp_st))) { rtsp_send_reply(s, RTSP_STATUS_TRANSPORT, NULL, request.seq); return ret; } localport = ff_rtp_get_local_rtp_port(rtsp_st->rtp_handle); snprintf(responseheaders, sizeof(responseheaders), "Transport: " "RTP/AVP/UDP;unicast;mode=receive;source=%s;" "client_port=%d-%d;server_port=%d-%d\r\n", host, request.transports[0].client_port_min, request.transports[0].client_port_max, localport, localport + 1); } /* Establish sessionid if not previously set */ /* Put this in a function? */ /* RFC 2326: session id must be at least 8 digits */ while (strlen(rt->session_id) < 8) av_strlcatf(rt->session_id, 512, "%u", av_get_random_seed()); av_strlcatf(responseheaders, sizeof(responseheaders), "Session: %s\r\n", rt->session_id); /* Send Reply */ rtsp_send_reply(s, RTSP_STATUS_OK, responseheaders, request.seq); rt->state = RTSP_STATE_PAUSED; return 0; }
static int config(struct vf_instance *vf, int width, int height, int d_width, int d_height, unsigned int flags, unsigned int outfmt){ int size, i; char *p; AVDictionary *opts = NULL; mux_v->bih->biWidth=width; mux_v->bih->biHeight=height; mux_v->bih->biSizeImage=mux_v->bih->biWidth*mux_v->bih->biHeight*(mux_v->bih->biBitCount/8); mp_msg(MSGT_MENCODER, MSGL_INFO,"videocodec: libavcodec (%dx%d fourcc=%x [%.4s])\n", mux_v->bih->biWidth, mux_v->bih->biHeight, mux_v->bih->biCompression, (char *)&mux_v->bih->biCompression); lavc_venc_context->width = width; lavc_venc_context->height = height; if (lavc_param_vbitrate > 16000) /* != -1 */ lavc_venc_context->bit_rate = lavc_param_vbitrate; else if (lavc_param_vbitrate >= 0) /* != -1 */ lavc_venc_context->bit_rate = lavc_param_vbitrate*1000; else lavc_venc_context->bit_rate = 800000; /* default */ mux_v->avg_rate= lavc_venc_context->bit_rate; lavc_venc_context->bit_rate_tolerance= lavc_param_vrate_tolerance*1000; lavc_venc_context->time_base= (AVRational){mux_v->h.dwScale, mux_v->h.dwRate}; lavc_venc_context->qmin= lavc_param_vqmin; lavc_venc_context->qmax= lavc_param_vqmax; lavc_venc_context->lmin= (int)(FF_QP2LAMBDA * lavc_param_lmin + 0.5); lavc_venc_context->lmax= (int)(FF_QP2LAMBDA * lavc_param_lmax + 0.5); lavc_venc_context->mb_lmin= (int)(FF_QP2LAMBDA * lavc_param_mb_lmin + 0.5); lavc_venc_context->mb_lmax= (int)(FF_QP2LAMBDA * lavc_param_mb_lmax + 0.5); lavc_venc_context->max_qdiff= lavc_param_vqdiff; lavc_venc_context->qcompress= lavc_param_vqcompress; lavc_venc_context->qblur= lavc_param_vqblur; lavc_venc_context->max_b_frames= lavc_param_vmax_b_frames; lavc_venc_context->b_quant_factor= lavc_param_vb_qfactor; lavc_venc_context->rc_strategy= lavc_param_vrc_strategy; lavc_venc_context->b_frame_strategy= lavc_param_vb_strategy; lavc_venc_context->b_quant_offset= (int)(FF_QP2LAMBDA * lavc_param_vb_qoffset + 0.5); lavc_venc_context->rtp_payload_size= lavc_param_packet_size; lavc_venc_context->strict_std_compliance= lavc_param_strict; lavc_venc_context->i_quant_factor= lavc_param_vi_qfactor; lavc_venc_context->i_quant_offset= (int)(FF_QP2LAMBDA * lavc_param_vi_qoffset + 0.5); lavc_venc_context->rc_qsquish= lavc_param_rc_qsquish; lavc_venc_context->rc_qmod_amp= lavc_param_rc_qmod_amp; lavc_venc_context->rc_qmod_freq= lavc_param_rc_qmod_freq; lavc_venc_context->rc_eq= lavc_param_rc_eq; mux_v->max_rate= lavc_venc_context->rc_max_rate= lavc_param_rc_max_rate*1000; lavc_venc_context->rc_min_rate= lavc_param_rc_min_rate*1000; mux_v->vbv_size= lavc_venc_context->rc_buffer_size= lavc_param_rc_buffer_size*1000; lavc_venc_context->rc_initial_buffer_occupancy= lavc_venc_context->rc_buffer_size * lavc_param_rc_initial_buffer_occupancy; lavc_venc_context->rc_buffer_aggressivity= lavc_param_rc_buffer_aggressivity; lavc_venc_context->rc_initial_cplx= lavc_param_rc_initial_cplx; lavc_venc_context->debug= lavc_param_debug; lavc_venc_context->last_predictor_count= lavc_param_last_pred; lavc_venc_context->pre_me= lavc_param_pre_me; lavc_venc_context->me_pre_cmp= lavc_param_me_pre_cmp; lavc_venc_context->pre_dia_size= lavc_param_pre_dia_size; lavc_venc_context->me_subpel_quality= lavc_param_me_subpel_quality; lavc_venc_context->me_range= lavc_param_me_range; lavc_venc_context->intra_quant_bias= lavc_param_ibias; lavc_venc_context->inter_quant_bias= lavc_param_pbias; lavc_venc_context->coder_type= lavc_param_coder; lavc_venc_context->context_model= lavc_param_context; lavc_venc_context->scenechange_threshold= lavc_param_sc_threshold; lavc_venc_context->noise_reduction= lavc_param_noise_reduction; lavc_venc_context->nsse_weight= lavc_param_nssew; lavc_venc_context->frame_skip_threshold= lavc_param_skip_threshold; lavc_venc_context->frame_skip_factor= lavc_param_skip_factor; lavc_venc_context->frame_skip_exp= lavc_param_skip_exp; lavc_venc_context->frame_skip_cmp= lavc_param_skip_cmp; if (lavc_param_intra_matrix) { char *tmp; lavc_venc_context->intra_matrix = av_malloc(sizeof(*lavc_venc_context->intra_matrix)*64); i = 0; while ((tmp = strsep(&lavc_param_intra_matrix, ",")) && (i < 64)) { if (!tmp || (tmp && !strlen(tmp))) break; lavc_venc_context->intra_matrix[i++] = atoi(tmp); } if (i != 64) av_freep(&lavc_venc_context->intra_matrix); else mp_msg(MSGT_MENCODER, MSGL_V, "Using user specified intra matrix\n"); } if (lavc_param_inter_matrix) { char *tmp; lavc_venc_context->inter_matrix = av_malloc(sizeof(*lavc_venc_context->inter_matrix)*64); i = 0; while ((tmp = strsep(&lavc_param_inter_matrix, ",")) && (i < 64)) { if (!tmp || (tmp && !strlen(tmp))) break; lavc_venc_context->inter_matrix[i++] = atoi(tmp); } if (i != 64) av_freep(&lavc_venc_context->inter_matrix); else mp_msg(MSGT_MENCODER, MSGL_V, "Using user specified inter matrix\n"); } p= lavc_param_rc_override_string; for(i=0; p; i++){ int start, end, q; int e=sscanf(p, "%d,%d,%d", &start, &end, &q); if(e!=3){ mp_msg(MSGT_MENCODER,MSGL_ERR,"error parsing vrc_q\n"); return 0; } lavc_venc_context->rc_override= realloc(lavc_venc_context->rc_override, sizeof(RcOverride)*(i+1)); lavc_venc_context->rc_override[i].start_frame= start; lavc_venc_context->rc_override[i].end_frame = end; if(q>0){ lavc_venc_context->rc_override[i].qscale= q; lavc_venc_context->rc_override[i].quality_factor= 1.0; } else{ lavc_venc_context->rc_override[i].qscale= 0; lavc_venc_context->rc_override[i].quality_factor= -q/100.0; } p= strchr(p, '/'); if(p) p++; } lavc_venc_context->rc_override_count=i; lavc_venc_context->mpeg_quant=lavc_param_mpeg_quant; lavc_venc_context->dct_algo= lavc_param_fdct; lavc_venc_context->idct_algo= lavc_param_idct; lavc_venc_context->lumi_masking= lavc_param_lumi_masking; lavc_venc_context->temporal_cplx_masking= lavc_param_temporal_cplx_masking; lavc_venc_context->spatial_cplx_masking= lavc_param_spatial_cplx_masking; lavc_venc_context->p_masking= lavc_param_p_masking; lavc_venc_context->dark_masking= lavc_param_dark_masking; lavc_venc_context->border_masking = lavc_param_border_masking; if (lavc_param_aspect != NULL) { int par_width, par_height, e; float ratio=0; e= sscanf (lavc_param_aspect, "%d/%d", &par_width, &par_height); if(e==2){ if(par_height) ratio= (float)par_width / (float)par_height; }else{ e= sscanf (lavc_param_aspect, "%f", &ratio); } if (e && ratio > 0.1 && ratio < 10.0) { lavc_venc_context->sample_aspect_ratio= av_d2q(ratio * height / width, 255); mp_dbg(MSGT_MENCODER, MSGL_DBG2, "sample_aspect_ratio: %d/%d\n", lavc_venc_context->sample_aspect_ratio.num, lavc_venc_context->sample_aspect_ratio.den); mux_v->aspect = ratio; mp_dbg(MSGT_MENCODER, MSGL_DBG2, "aspect_ratio: %f\n", ratio); } else { mp_dbg(MSGT_MENCODER, MSGL_ERR, "aspect ratio: cannot parse \"%s\"\n", lavc_param_aspect); return 0; } } else if (lavc_param_autoaspect) { lavc_venc_context->sample_aspect_ratio = av_d2q((float)d_width/d_height*height / width, 255); mux_v->aspect = (float)d_width/d_height; } /* keyframe interval */ if (lavc_param_keyint >= 0) /* != -1 */ lavc_venc_context->gop_size = lavc_param_keyint; else lavc_venc_context->gop_size = 250; /* default */ lavc_venc_context->flags = 0; if (lavc_param_mb_decision) { mp_msg(MSGT_MENCODER, MSGL_INFO, MSGTR_MPCODECS_HighQualityEncodingSelected); lavc_venc_context->mb_decision= lavc_param_mb_decision; } lavc_venc_context->me_cmp= lavc_param_me_cmp; lavc_venc_context->me_sub_cmp= lavc_param_me_sub_cmp; lavc_venc_context->mb_cmp= lavc_param_mb_cmp; #ifdef FF_CMP_VSAD lavc_venc_context->ildct_cmp= lavc_param_ildct_cmp; #endif lavc_venc_context->dia_size= lavc_param_dia_size; lavc_venc_context->flags|= lavc_param_qpel; lavc_venc_context->trellis = lavc_param_trell; lavc_venc_context->flags|= lavc_param_lowdelay; lavc_venc_context->flags|= lavc_param_bit_exact; lavc_venc_context->flags|= lavc_param_aic; if (lavc_param_aiv) av_dict_set(&opts, "aiv", "1", 0); if (lavc_param_umv) av_dict_set(&opts, "umv", "1", 0); if (lavc_param_obmc) av_dict_set(&opts, "obmc", "1", 0); lavc_venc_context->flags|= lavc_param_loop; lavc_venc_context->flags|= lavc_param_v4mv ? CODEC_FLAG_4MV : 0; if (lavc_param_data_partitioning) av_dict_set(&opts, "data_partitioning", "1", 0); lavc_venc_context->flags|= lavc_param_mv0; if (lavc_param_ss) av_dict_set(&opts, "structured_slices", "1", 0); if (lavc_param_alt) av_dict_set(&opts, "alternate_scan", "1", 0); lavc_venc_context->flags|= lavc_param_ilme; lavc_venc_context->flags|= lavc_param_gmc; #ifdef CODEC_FLAG_CLOSED_GOP lavc_venc_context->flags|= lavc_param_closed_gop; #endif lavc_venc_context->flags|= lavc_param_gray; if(lavc_param_normalize_aqp) lavc_venc_context->flags|= CODEC_FLAG_NORMALIZE_AQP; if(lavc_param_interlaced_dct) lavc_venc_context->flags|= CODEC_FLAG_INTERLACED_DCT; lavc_venc_context->flags|= lavc_param_psnr; lavc_venc_context->intra_dc_precision = lavc_param_dc_precision - 8; lavc_venc_context->prediction_method= lavc_param_prediction_method; lavc_venc_context->brd_scale = lavc_param_brd_scale; lavc_venc_context->bidir_refine = lavc_param_bidir_refine; lavc_venc_context->scenechange_factor = lavc_param_sc_factor; if((lavc_param_video_global_header&1) /*|| (video_global_header==0 && (oc->oformat->flags & AVFMT_GLOBALHEADER))*/){ lavc_venc_context->flags |= CODEC_FLAG_GLOBAL_HEADER; } if(lavc_param_video_global_header&2){ lavc_venc_context->flags2 |= CODEC_FLAG2_LOCAL_HEADER; } lavc_venc_context->mv0_threshold = lavc_param_mv0_threshold; lavc_venc_context->refs = lavc_param_refs; lavc_venc_context->b_sensitivity = lavc_param_b_sensitivity; lavc_venc_context->level = lavc_param_level; if(lavc_param_avopt){ if(av_dict_parse_string(&opts, lavc_param_avopt, "=", ",", 0) < 0){ mp_msg(MSGT_MENCODER,MSGL_ERR, "Your options /%s/ look like gibberish to me pal\n", lavc_param_avopt); return 0; } } mux_v->imgfmt = lavc_param_format; lavc_venc_context->pix_fmt = imgfmt2pixfmt(lavc_param_format); if (lavc_venc_context->pix_fmt == PIX_FMT_NONE) return 0; if(!stats_file) { /* lavc internal 2pass bitrate control */ switch(lavc_param_vpass){ case 2: case 3: lavc_venc_context->flags|= CODEC_FLAG_PASS2; stats_file= fopen(passtmpfile, "rb"); if(stats_file==NULL){ mp_msg(MSGT_MENCODER,MSGL_ERR,"2pass failed: filename=%s\n", passtmpfile); return 0; } fseek(stats_file, 0, SEEK_END); size= ftell(stats_file); fseek(stats_file, 0, SEEK_SET); lavc_venc_context->stats_in= av_malloc(size + 1); lavc_venc_context->stats_in[size]=0; if(fread(lavc_venc_context->stats_in, size, 1, stats_file)<1){ mp_msg(MSGT_MENCODER,MSGL_ERR,"2pass failed: reading from filename=%s\n", passtmpfile); return 0; } if(lavc_param_vpass == 2) break; else fclose(stats_file); /* fall through */ case 1: lavc_venc_context->flags|= CODEC_FLAG_PASS1; stats_file= fopen(passtmpfile, "wb"); if(stats_file==NULL){ mp_msg(MSGT_MENCODER,MSGL_ERR,"2pass failed: filename=%s\n", passtmpfile); return 0; } if(lavc_param_turbo && (lavc_param_vpass == 1)) { /* uses SAD comparison functions instead of other hungrier */ lavc_venc_context->me_pre_cmp = 0; lavc_venc_context->me_cmp = 0; lavc_venc_context->me_sub_cmp = 0; lavc_venc_context->mb_cmp = 2; /* Disables diamond motion estimation */ lavc_venc_context->pre_dia_size = 0; lavc_venc_context->dia_size = 1; lavc_venc_context->noise_reduction = 0; // nr=0 lavc_venc_context->mb_decision = 0; // mbd=0 ("realtime" encoding) lavc_venc_context->flags &= ~CODEC_FLAG_QPEL; lavc_venc_context->flags &= ~CODEC_FLAG_4MV; lavc_venc_context->trellis = 0; lavc_venc_context->flags &= ~CODEC_FLAG_MV0; av_dict_set(&opts, "mpv_flags", "-qp_rd-cbp_rd", 0); } break; } } lavc_venc_context->me_method = ME_ZERO+lavc_param_vme; /* fixed qscale :p */ if (lavc_param_vqscale >= 0.0) { mp_msg(MSGT_MENCODER, MSGL_INFO, MSGTR_MPCODECS_UsingConstantQscale, lavc_param_vqscale); lavc_venc_context->flags |= CODEC_FLAG_QSCALE; lavc_venc_context->global_quality= vf->priv->pic->quality = (int)(FF_QP2LAMBDA * lavc_param_vqscale + 0.5); } lavc_venc_context->thread_count = lavc_param_threads; lavc_venc_context->thread_type = FF_THREAD_FRAME | FF_THREAD_SLICE; if (avcodec_open2(lavc_venc_context, vf->priv->codec, &opts) != 0) { mp_msg(MSGT_MENCODER,MSGL_ERR,MSGTR_CantOpenCodec); return 0; } if (av_dict_count(opts)) { AVDictionaryEntry *e = NULL; while ((e = av_dict_get(opts, "", e, AV_DICT_IGNORE_SUFFIX))) mp_msg(MSGT_MENCODER,MSGL_ERR,"Unknown option %s\n", e->key); return 0; } av_dict_free(&opts); /* free second pass buffer, its not needed anymore */ av_freep(&lavc_venc_context->stats_in); if(lavc_venc_context->bits_per_coded_sample) mux_v->bih->biBitCount= lavc_venc_context->bits_per_coded_sample; if(lavc_venc_context->extradata_size){ mux_v->bih= realloc(mux_v->bih, sizeof(*mux_v->bih) + lavc_venc_context->extradata_size); memcpy(mux_v->bih + 1, lavc_venc_context->extradata, lavc_venc_context->extradata_size); mux_v->bih->biSize= sizeof(*mux_v->bih) + lavc_venc_context->extradata_size; } mux_v->decoder_delay = lavc_venc_context->max_b_frames ? 1 : 0; return 1; }