void VideoOutput::initialize(const String& filename, const Settings& settings) { // helper for exiting VideoOutput construction (exceptions caught by static ref creator) #define throwException(exp, msg) if (!(exp)) { throw String(msg); } debugAssert(settings.width > 0); debugAssert(settings.height > 0); debugAssert(settings.fps > 0); #ifndef G3D_NO_FFMPEG // initialize list of available muxers/demuxers and codecs in ffmpeg av_register_all(); m_filename = filename; m_settings = settings; // see if ffmpeg can support this muxer and setup output format m_avOutputFormat = av_guess_format(NULL, filename.c_str(), NULL); throwException(m_avOutputFormat, ("Error initializing FFmpeg in guess_format.")); // set the codec id m_avOutputFormat->video_codec = static_cast<AVCodecID>(m_settings.codec); // create format context which controls writing the file m_avFormatContext = avformat_alloc_context(); throwException(m_avFormatContext, ("Error initializing FFmpeg in av_alloc_format_context.")); // attach format to context m_avFormatContext->oformat = m_avOutputFormat; strncpy(m_avFormatContext->filename, filename.c_str(), sizeof(m_avFormatContext->filename)); // add video stream 0 m_avStream = avformat_new_stream(m_avFormatContext, 0); throwException(m_avStream, ("Error initializing FFmpeg in av_new_stream.")); // setup video stream m_avStream->codec->codec_id = m_avOutputFormat->video_codec; m_avStream->codec->codec_type = AVMEDIA_TYPE_VIDEO; // find and open required codec AVCodec* codec = avcodec_find_encoder(m_avStream->codec->codec_id); throwException(codec, format("Could not find an %s (%d) encoder on this machine.", toString(static_cast<InternalCodecID>(m_avStream->codec->codec_id)), m_avStream->codec->codec_id)); // finish setting codec parameters m_avStream->codec->bit_rate = m_settings.bitrate * 10; m_avStream->time_base.den = 30; m_avStream->time_base.num = 1; m_avStream->codec->time_base.den = 30; m_avStream->codec->time_base.num = 1; m_avStream->codec->width = m_settings.width; m_avStream->codec->height = m_settings.height; // set codec input format if (m_settings.codec == CODEC_ID_RAWVIDEO) { m_avStream->codec->pix_fmt = ConvertImageFormatToPixelFormat(m_settings.raw.format); throwException(m_avStream->codec->pix_fmt != PIX_FMT_NONE, ("Error initializing FFmpeg setting raw video input format.")); } else { m_avStream->codec->pix_fmt = codec->pix_fmts[0]; } // set the fourcc if (m_settings.fourcc != 0) { m_avStream->codec->codec_tag = m_settings.fourcc; } // some formats want stream headers to be separate if (m_avOutputFormat->flags & AVFMT_GLOBALHEADER) { m_avStream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; } //set a bunch of obscure presets that stop ffmpeg from breaking m_avStream->codec->rc_max_rate = 0; m_avStream->codec->rc_buffer_size = 0; m_avStream->codec->gop_size = 40; m_avStream->codec->max_b_frames = 3; m_avStream->codec->b_frame_strategy = 1; m_avStream->codec->coder_type = 1; m_avStream->codec->me_cmp = 1; m_avStream->codec->me_range = 16; m_avStream->codec->qmin = 10; m_avStream->codec->qmax = 51; m_avStream->codec->scenechange_threshold = 40; m_avStream->codec->flags |= CODEC_FLAG_LOOP_FILTER; m_avStream->codec->me_method = ME_HEX; m_avStream->codec->me_subpel_quality = 5; m_avStream->codec->i_quant_factor = 0.71; m_avStream->codec->qcompress = 0.6; m_avStream->codec->max_qdiff = 4; m_avStream->codec->profile = FF_PROFILE_H264_BASELINE; int avRet = avcodec_open2(m_avStream->codec, codec, NULL); throwException(avRet >= 0, ("Error initializing FFmpeg in avcodec_open")); // create encoding buffer - just allocate largest possible for now (3 channels) m_avEncodingBufferSize = iMax(m_settings.width * m_settings.height * 3, 512 * 1024); m_avEncodingBuffer = static_cast<uint8*>(av_malloc(m_avEncodingBufferSize)); // create buffer to hold converted input frame if the codec needs a conversion int inputBufferSize = avpicture_get_size(m_avStream->codec->pix_fmt, m_settings.width, m_settings.height); m_avInputBuffer = static_cast<uint8*>(av_malloc(inputBufferSize)); throwException(m_avInputBuffer, ("Error initializing FFmpeg in av_malloc")); m_avInputFrame = avcodec_alloc_frame(); throwException(m_avInputFrame, ("Error initializing FFmpeg in avcodec_alloc_frame")); avpicture_fill(reinterpret_cast<AVPicture*>(m_avInputFrame), m_avInputBuffer, m_avStream->codec->pix_fmt, m_settings.width, m_settings.height); // open output file for writing using ffmpeg avRet = avio_open(&m_avFormatContext->pb, filename.c_str(), AVIO_FLAG_WRITE); throwException(avRet >= 0, ("Error opening FFmpeg video file with url_fopen")); // start the stream avRet = avformat_write_header(m_avFormatContext, NULL); // make sure the file is closed on error if (avRet < 0) { // abort closes and removes the output file abort(); throwException(false, ("Error initializing and writing FFmpeg video file.")); } #endif m_isInitialized = true; }
// http://blog.csdn.net/leixiaohua1020/article/details/25346147 int yuv_to_jpg (unsigned char * yuv, unsigned iw, unsigned ih, const char * jpg_file) { AVFormatContext * pFormatCtx; AVOutputFormat * fmt; AVStream * video_st; AVCodecContext * pCodecCtx; AVCodec * pCodec; AVFrame* picture; AVPacket pkt; unsigned y_size = 0; unsigned size = 0; int got_pic = 0; int ret = 0; av_register_all (); //Method 1 pFormatCtx = avformat_alloc_context (); //Guess format fmt = av_guess_format ("mjpeg", NULL, NULL); pFormatCtx->oformat = fmt; //Output URL if (avio_open (&pFormatCtx->pb,jpg_file, AVIO_FLAG_READ_WRITE) < 0){ printf("Couldn't open output file."); return -1; } video_st = avformat_new_stream (pFormatCtx, 0); if (video_st==NULL) return -1; pCodecCtx = video_st->codec; pCodecCtx->codec_id = fmt->video_codec; pCodecCtx->codec_type = AVMEDIA_TYPE_VIDEO; pCodecCtx->pix_fmt = AV_PIX_FMT_YUVJ420P; pCodecCtx->width = iw; pCodecCtx->height = ih; pCodecCtx->time_base.num = 1; pCodecCtx->time_base.den = 25; pCodec = avcodec_find_encoder (pCodecCtx->codec_id); if (!pCodec) { printf("Codec not found."); return -1; } if (avcodec_open2 (pCodecCtx, pCodec,NULL) < 0){ printf("Could not open codec."); return -1; } picture = av_frame_alloc (); // size = iw * ih * 3 / 2 size = avpicture_fill ((AVPicture *) picture, NULL, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height); //Read YUV y_size = pCodecCtx->width * pCodecCtx->height; picture->data[0] = yuv; // Y picture->data[1] = yuv + y_size; // U picture->data[2] = yuv + y_size * 5 / 4; // V //Write Header avformat_write_header (pFormatCtx,NULL); //Encode av_new_packet (&pkt, y_size * 3); ret = avcodec_encode_video2 (pCodecCtx, &pkt, picture, &got_pic); if(ret < 0){ printf("Encode Error.\n"); return -1; } if (got_pic==1) { pkt.stream_index = video_st->index; ret = av_write_frame (pFormatCtx, &pkt); } av_free_packet (&pkt); //Write Trailer av_write_trailer (pFormatCtx); printf ("Encode Successful.\n"); if (video_st) { avcodec_close (video_st->codec); av_free (picture); } avio_close (pFormatCtx->pb); avformat_free_context (pFormatCtx); return 0; }
bool FFmpegEncoderOpen(struct FFmpegEncoder* encoder, const char* outfile) { AVCodec* acodec = avcodec_find_encoder_by_name(encoder->audioCodec); AVCodec* vcodec = avcodec_find_encoder_by_name(encoder->videoCodec); if ((encoder->audioCodec && !acodec) || !vcodec || !FFmpegEncoderVerifyContainer(encoder)) { return false; } encoder->currentAudioSample = 0; encoder->currentAudioFrame = 0; encoder->currentVideoFrame = 0; encoder->nextAudioPts = 0; AVOutputFormat* oformat = av_guess_format(encoder->containerFormat, 0, 0); #ifndef USE_LIBAV avformat_alloc_output_context2(&encoder->context, oformat, 0, outfile); #else encoder->context = avformat_alloc_context(); strncpy(encoder->context->filename, outfile, sizeof(encoder->context->filename) - 1); encoder->context->filename[sizeof(encoder->context->filename) - 1] = '\0'; encoder->context->oformat = oformat; #endif if (acodec) { #ifdef FFMPEG_USE_CODECPAR encoder->audioStream = avformat_new_stream(encoder->context, NULL); encoder->audio = avcodec_alloc_context3(acodec); #else encoder->audioStream = avformat_new_stream(encoder->context, acodec); encoder->audio = encoder->audioStream->codec; #endif encoder->audio->bit_rate = encoder->audioBitrate; encoder->audio->channels = 2; encoder->audio->channel_layout = AV_CH_LAYOUT_STEREO; encoder->audio->sample_rate = encoder->sampleRate; encoder->audio->sample_fmt = encoder->sampleFormat; AVDictionary* opts = 0; av_dict_set(&opts, "strict", "-2", 0); if (encoder->context->oformat->flags & AVFMT_GLOBALHEADER) { encoder->audio->flags |= CODEC_FLAG_GLOBAL_HEADER; } avcodec_open2(encoder->audio, acodec, &opts); av_dict_free(&opts); #if LIBAVCODEC_VERSION_MAJOR >= 55 encoder->audioFrame = av_frame_alloc(); #else encoder->audioFrame = avcodec_alloc_frame(); #endif if (!encoder->audio->frame_size) { encoder->audio->frame_size = 1; } encoder->audioFrame->nb_samples = encoder->audio->frame_size; encoder->audioFrame->format = encoder->audio->sample_fmt; encoder->audioFrame->pts = 0; encoder->resampleContext = avresample_alloc_context(); av_opt_set_int(encoder->resampleContext, "in_channel_layout", AV_CH_LAYOUT_STEREO, 0); av_opt_set_int(encoder->resampleContext, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0); av_opt_set_int(encoder->resampleContext, "in_sample_rate", PREFERRED_SAMPLE_RATE, 0); av_opt_set_int(encoder->resampleContext, "out_sample_rate", encoder->sampleRate, 0); av_opt_set_int(encoder->resampleContext, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0); av_opt_set_int(encoder->resampleContext, "out_sample_fmt", encoder->sampleFormat, 0); avresample_open(encoder->resampleContext); encoder->audioBufferSize = (encoder->audioFrame->nb_samples * PREFERRED_SAMPLE_RATE / encoder->sampleRate) * 4; encoder->audioBuffer = av_malloc(encoder->audioBufferSize); encoder->postaudioBufferSize = av_samples_get_buffer_size(0, encoder->audio->channels, encoder->audio->frame_size, encoder->audio->sample_fmt, 0); encoder->postaudioBuffer = av_malloc(encoder->postaudioBufferSize); avcodec_fill_audio_frame(encoder->audioFrame, encoder->audio->channels, encoder->audio->sample_fmt, (const uint8_t*) encoder->postaudioBuffer, encoder->postaudioBufferSize, 0); if (encoder->audio->codec->id == AV_CODEC_ID_AAC && (strcasecmp(encoder->containerFormat, "mp4") || strcasecmp(encoder->containerFormat, "m4v") || strcasecmp(encoder->containerFormat, "mov"))) { // MP4 container doesn't support the raw ADTS AAC format that the encoder spits out #ifdef FFMPEG_USE_NEW_BSF av_bsf_alloc(av_bsf_get_by_name("aac_adtstoasc"), &encoder->absf); avcodec_parameters_from_context(encoder->absf->par_in, encoder->audio); av_bsf_init(encoder->absf); #else encoder->absf = av_bitstream_filter_init("aac_adtstoasc"); #endif } #ifdef FFMPEG_USE_CODECPAR avcodec_parameters_from_context(encoder->audioStream->codecpar, encoder->audio); #endif } #ifdef FFMPEG_USE_CODECPAR encoder->videoStream = avformat_new_stream(encoder->context, NULL); encoder->video = avcodec_alloc_context3(vcodec); #else encoder->videoStream = avformat_new_stream(encoder->context, vcodec); encoder->video = encoder->videoStream->codec; #endif encoder->video->bit_rate = encoder->videoBitrate; encoder->video->width = encoder->width; encoder->video->height = encoder->height; encoder->video->time_base = (AVRational) { VIDEO_TOTAL_LENGTH, GBA_ARM7TDMI_FREQUENCY }; encoder->video->pix_fmt = encoder->pixFormat; encoder->video->gop_size = 60; encoder->video->max_b_frames = 3; if (encoder->context->oformat->flags & AVFMT_GLOBALHEADER) { encoder->video->flags |= CODEC_FLAG_GLOBAL_HEADER; } if (strcmp(vcodec->name, "libx264") == 0) { // Try to adaptively figure out when you can use a slower encoder if (encoder->width * encoder->height > 1000000) { av_opt_set(encoder->video->priv_data, "preset", "superfast", 0); } else if (encoder->width * encoder->height > 500000) { av_opt_set(encoder->video->priv_data, "preset", "veryfast", 0); } else { av_opt_set(encoder->video->priv_data, "preset", "faster", 0); } av_opt_set(encoder->video->priv_data, "tune", "zerolatency", 0); } avcodec_open2(encoder->video, vcodec, 0); #if LIBAVCODEC_VERSION_MAJOR >= 55 encoder->videoFrame = av_frame_alloc(); #else encoder->videoFrame = avcodec_alloc_frame(); #endif encoder->videoFrame->format = encoder->video->pix_fmt; encoder->videoFrame->width = encoder->video->width; encoder->videoFrame->height = encoder->video->height; encoder->videoFrame->pts = 0; _ffmpegSetVideoDimensions(&encoder->d, encoder->iwidth, encoder->iheight); av_image_alloc(encoder->videoFrame->data, encoder->videoFrame->linesize, encoder->video->width, encoder->video->height, encoder->video->pix_fmt, 32); #ifdef FFMPEG_USE_CODECPAR avcodec_parameters_from_context(encoder->videoStream->codecpar, encoder->video); #endif avio_open(&encoder->context->pb, outfile, AVIO_FLAG_WRITE); return avformat_write_header(encoder->context, 0) >= 0; }
static int ffserver_parse_config_feed(FFServerConfig *config, const char *cmd, const char **p, int line_num, FFServerStream **pfeed) { FFServerStream *feed; char arg[1024]; av_assert0(pfeed); feed = *pfeed; if (!av_strcasecmp(cmd, "<Feed")) { char *q; FFServerStream *s; feed = av_mallocz(sizeof(FFServerStream)); if (!feed) return AVERROR(ENOMEM); ffserver_get_arg(feed->filename, sizeof(feed->filename), p); q = strrchr(feed->filename, '>'); if (*q) *q = '\0'; for (s = config->first_feed; s; s = s->next) { if (!strcmp(feed->filename, s->filename)) ERROR("Feed '%s' already registered\n", s->filename); } feed->fmt = av_guess_format("ffm", NULL, NULL); /* default feed file */ snprintf(feed->feed_filename, sizeof(feed->feed_filename), "/tmp/%s.ffm", feed->filename); feed->feed_max_size = 5 * 1024 * 1024; feed->is_feed = 1; feed->feed = feed; /* self feeding :-) */ *pfeed = feed; return 0; } av_assert0(feed); if (!av_strcasecmp(cmd, "Launch")) { int i; feed->child_argv = av_mallocz(64 * sizeof(char *)); if (!feed->child_argv) return AVERROR(ENOMEM); for (i = 0; i < 62; i++) { ffserver_get_arg(arg, sizeof(arg), p); if (!arg[0]) break; feed->child_argv[i] = av_strdup(arg); if (!feed->child_argv[i]) return AVERROR(ENOMEM); } feed->child_argv[i] = av_asprintf("http://%s:%d/%s", (config->http_addr.sin_addr.s_addr == INADDR_ANY) ? "127.0.0.1" : inet_ntoa(config->http_addr.sin_addr), ntohs(config->http_addr.sin_port), feed->filename); if (!feed->child_argv[i]) return AVERROR(ENOMEM); } else if (!av_strcasecmp(cmd, "ACL")) { ffserver_parse_acl_row(NULL, feed, NULL, *p, config->filename, line_num); } else if (!av_strcasecmp(cmd, "File") || !av_strcasecmp(cmd, "ReadOnlyFile")) { ffserver_get_arg(feed->feed_filename, sizeof(feed->feed_filename), p); feed->readonly = !av_strcasecmp(cmd, "ReadOnlyFile"); } else if (!av_strcasecmp(cmd, "Truncate")) { ffserver_get_arg(arg, sizeof(arg), p); /* assume Truncate is true in case no argument is specified */ if (!arg[0]) { feed->truncate = 1; } else { WARNING("Truncate N syntax in configuration file is deprecated, " "use Truncate alone with no arguments\n"); feed->truncate = strtod(arg, NULL); } } else if (!av_strcasecmp(cmd, "FileMaxSize")) { char *p1; double fsize; ffserver_get_arg(arg, sizeof(arg), p); p1 = arg; fsize = strtod(p1, &p1); switch(av_toupper(*p1)) { case 'K': fsize *= 1024; break; case 'M': fsize *= 1024 * 1024; break; case 'G': fsize *= 1024 * 1024 * 1024; break; default: ERROR("Invalid file size: %s\n", arg); break; } feed->feed_max_size = (int64_t)fsize; if (feed->feed_max_size < FFM_PACKET_SIZE*4) ERROR("Feed max file size is too small, must be at least %d\n", FFM_PACKET_SIZE*4); } else if (!av_strcasecmp(cmd, "</Feed>")) { *pfeed = NULL; } else { ERROR("Invalid entry '%s' inside <Feed></Feed>\n", cmd); } return 0; }
/** * get_oformat * Obtains the output format used for the specified codec. For mpeg4 codecs, * the format is avi; for mpeg1 codec, the format is mpeg. The filename has * to be passed, because it gets the appropriate extension appended onto it. * * Returns * AVOutputFormat pointer or NULL if any error happens. */ static AVOutputFormat *get_oformat(const char *codec, char *filename) { const char *ext; AVOutputFormat *of = NULL; /* * Here, we use guess_format to automatically setup the codec information. * If we are using msmpeg4, manually set that codec here. * We also dynamically add the file extension to the filename here. This was * done to support both mpeg1 and mpeg4 codecs since they have different extensions. */ if ((strcmp(codec, TIMELAPSE_CODEC) == 0) #ifndef FFMPEG_NO_NONSTD_MPEG1 || (strcmp(codec, "mpeg1") == 0) #endif ) { ext = ".mpg"; /* * We use "mpeg1video" for raw mpeg1 format. Using "mpeg" would * result in a muxed output file, which isn't appropriate here. */ #ifdef GUESS_NO_DEPRECATED of = guess_format("mpeg1video", NULL, NULL); #else of = av_guess_format("mpeg1video", NULL, NULL); #endif /* But we want the trailer to be correctly written. */ if (of) of->write_trailer = mpeg1_write_trailer; #ifdef FFMPEG_NO_NONSTD_MPEG1 } else if (strcmp(codec, "mpeg1") == 0) { MOTION_LOG(WRN, TYPE_ENCODER, NO_ERRNO, "%s: *** mpeg1 support for normal" " videos has been disabled ***"); return NULL; #endif } else if (strcmp(codec, "mpeg4") == 0) { ext = ".avi"; #ifdef GUESS_NO_DEPRECATED of = guess_format("mpeg1video", NULL, NULL); #else of = av_guess_format("avi", NULL, NULL); #endif } else if (strcmp(codec, "msmpeg4") == 0) { ext = ".avi"; #ifdef GUESS_NO_DEPRECATED of = guess_format("mpeg1video", NULL, NULL); #else of = av_guess_format("avi", NULL, NULL); #endif /* Manually override the codec id. */ if (of) of->video_codec = CODEC_ID_MSMPEG4V2; } else if (strcmp(codec, "swf") == 0) { ext = ".swf"; #ifdef GUESS_NO_DEPRECATED of = guess_format("mpeg1video", NULL, NULL); #else of = av_guess_format("swf", NULL, NULL); #endif } else if (strcmp(codec, "flv") == 0) { ext = ".flv"; #ifdef GUESS_NO_DEPRECATED of = guess_format("mpeg1video", NULL, NULL); #else of = av_guess_format("flv", NULL, NULL); #endif of->video_codec = CODEC_ID_FLV1; } else if (strcmp(codec, "ffv1") == 0) { ext = ".avi"; #ifdef GUESS_NO_DEPRECATED of = guess_format("mpeg1video", NULL, NULL); #else of = av_guess_format("avi", NULL, NULL); #endif /* * Use the FFMPEG Lossless Video codec (experimental!). * Requires strict_std_compliance to be <= -2 */ if (of) of->video_codec = CODEC_ID_FFV1; } else if (strcmp(codec, "mov") == 0) { ext = ".mov"; #ifdef GUESS_NO_DEPRECATED of = guess_format("mpeg1video", NULL, NULL); #else of = av_guess_format("mov", NULL, NULL); #endif } else if (strcmp (codec, "ogg") == 0) { ext = ".ogg"; #ifdef GUESS_NO_DEPRECATED of = guess_format ("ogg", NULL, NULL); #else of = av_guess_format ("ogg", NULL, NULL); #endif } else { MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: ffmpeg_video_codec option value" " %s is not supported", codec); return NULL; } if (!of) { MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: Could not guess format for %s", codec); return NULL; } /* The 4 allows for ".avi" or ".mpg" to be appended. */ strncat(filename, ext, 4); return of; }
int main(int argc, char **argv) { const char *filename; AVOutputFormat *fmt; AVFormatContext *oc; AVStream *audio_st, *video_st; double audio_pts, video_pts; int i; /* Initialize libavcodec, and register all codecs and formats. */ av_register_all(); if (argc != 2) { printf("usage: %s output_file\n" "API example program to output a media file with libavformat.\n" "The output format is automatically guessed according to the file extension.\n" "Raw images can also be output by using '%%d' in the filename\n" "\n", argv[0]); return 1; } filename = argv[1]; /* Autodetect the output format from the name. default is MPEG. */ fmt = av_guess_format(NULL, filename, NULL); if (!fmt) { printf("Could not deduce output format from file extension: using MPEG.\n"); fmt = av_guess_format("mpeg", NULL, NULL); } if (!fmt) { fprintf(stderr, "Could not find suitable output format\n"); return 1; } /* Allocate the output media context. */ oc = avformat_alloc_context(); if (!oc) { fprintf(stderr, "Memory error\n"); return 1; } oc->oformat = fmt; snprintf(oc->filename, sizeof(oc->filename), "%s", filename); /* Add the audio and video streams using the default format codecs * and initialize the codecs. */ video_st = NULL; audio_st = NULL; if (fmt->video_codec != AV_CODEC_ID_NONE) { video_st = add_video_stream(oc, fmt->video_codec); } if (fmt->audio_codec != AV_CODEC_ID_NONE) { audio_st = add_audio_stream(oc, fmt->audio_codec); } /* Now that all the parameters are set, we can open the audio and * video codecs and allocate the necessary encode buffers. */ if (video_st) open_video(oc, video_st); if (audio_st) open_audio(oc, audio_st); av_dump_format(oc, 0, filename, 1); /* open the output file, if needed */ if (!(fmt->flags & AVFMT_NOFILE)) { if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0) { fprintf(stderr, "Could not open '%s'\n", filename); return 1; } } /* Write the stream header, if any. */ avformat_write_header(oc, NULL); for (;;) { /* Compute current audio and video time. */ if (audio_st) audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den; else audio_pts = 0.0; if (video_st) video_pts = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den; else video_pts = 0.0; if ((!audio_st || audio_pts >= STREAM_DURATION) && (!video_st || video_pts >= STREAM_DURATION)) break; /* write interleaved audio and video frames */ if (!video_st || (video_st && audio_st && audio_pts < video_pts)) { write_audio_frame(oc, audio_st); } else { write_video_frame(oc, video_st); } } /* Write the trailer, if any. The trailer must be written before you * close the CodecContexts open when you wrote the header; otherwise * av_write_trailer() may try to use memory that was freed on * av_codec_close(). */ av_write_trailer(oc); /* Close each codec. */ if (video_st) close_video(oc, video_st); if (audio_st) close_audio(oc, audio_st); /* Free the streams. */ for (i = 0; i < oc->nb_streams; i++) { av_freep(&oc->streams[i]->codec); av_freep(&oc->streams[i]); } if (!(fmt->flags & AVFMT_NOFILE)) /* Close the output file. */ avio_close(oc->pb); /* free the stream */ av_free(oc); return 0; }
bool matchFormat( const std::string& format, const std::string& filename ) { AVOutputFormat* avOutputFormat = av_guess_format( format.c_str(), filename.c_str(), NULL); return avOutputFormat != NULL; }
static struct proxy_output_ctx * alloc_proxy_output_ffmpeg( struct anim * anim, AVStream * st, int proxy_size, int width, int height, int quality) { struct proxy_output_ctx * rv = MEM_callocN( sizeof(struct proxy_output_ctx), "alloc_proxy_output"); char fname[FILE_MAXDIR+FILE_MAXFILE]; // JPEG requires this width = round_up(width, 8); height = round_up(height, 8); rv->proxy_size = proxy_size; rv->anim = anim; get_proxy_filename(rv->anim, rv->proxy_size, fname, TRUE); BLI_make_existing_file(fname); rv->of = avformat_alloc_context(); rv->of->oformat = av_guess_format("avi", NULL, NULL); BLI_snprintf(rv->of->filename, sizeof(rv->of->filename), "%s", fname); fprintf(stderr, "Starting work on proxy: %s\n", rv->of->filename); rv->st = av_new_stream(rv->of, 0); rv->c = rv->st->codec; rv->c->codec_type = AVMEDIA_TYPE_VIDEO; rv->c->codec_id = CODEC_ID_MJPEG; rv->c->width = width; rv->c->height = height; rv->of->oformat->video_codec = rv->c->codec_id; rv->codec = avcodec_find_encoder(rv->c->codec_id); if (!rv->codec) { fprintf(stderr, "No ffmpeg MJPEG encoder available? " "Proxy not built!\n"); av_free(rv->of); return NULL; } if (rv->codec->pix_fmts) { rv->c->pix_fmt = rv->codec->pix_fmts[0]; } else { rv->c->pix_fmt = PIX_FMT_YUVJ420P; } rv->c->sample_aspect_ratio = rv->st->sample_aspect_ratio = st->codec->sample_aspect_ratio; rv->c->time_base.den = 25; rv->c->time_base.num = 1; rv->st->time_base = rv->c->time_base; if (rv->of->flags & AVFMT_GLOBALHEADER) { rv->c->flags |= CODEC_FLAG_GLOBAL_HEADER; } if (av_set_parameters(rv->of, NULL) < 0) { fprintf(stderr, "Couldn't set output parameters? " "Proxy not built!\n"); av_free(rv->of); return 0; } if (avio_open(&rv->of->pb, fname, AVIO_FLAG_WRITE) < 0) { fprintf(stderr, "Couldn't open outputfile! " "Proxy not built!\n"); av_free(rv->of); return 0; } avcodec_open(rv->c, rv->codec); rv->video_buffersize = 2000000; rv->video_buffer = (uint8_t*)MEM_mallocN( rv->video_buffersize, "FFMPEG video buffer"); rv->orig_height = st->codec->height; if (st->codec->width != width || st->codec->height != height || st->codec->pix_fmt != rv->c->pix_fmt) { rv->frame = avcodec_alloc_frame(); avpicture_fill((AVPicture*) rv->frame, MEM_mallocN(avpicture_get_size( rv->c->pix_fmt, round_up(width, 16), height), "alloc proxy output frame"), rv->c->pix_fmt, round_up(width, 16), height); rv->sws_ctx = sws_getContext( st->codec->width, st->codec->height, st->codec->pix_fmt, width, height, rv->c->pix_fmt, SWS_FAST_BILINEAR | SWS_PRINT_INFO, NULL, NULL, NULL); } av_write_header(rv->of); return rv; }
int main(int argc, char **argv) { double prev_segment_time = 0; unsigned int output_index = 1; AVInputFormat *ifmt; AVOutputFormat *ofmt; AVFormatContext *ic = NULL; AVFormatContext *oc; AVStream *video_st = NULL; AVStream *audio_st = NULL; AVCodec *codec; char *output_filename; char *remove_filename; int video_index = -1; int audio_index = -1; int last_chunk = 0; int resume = 0; unsigned int first_segment = 1; unsigned int last_segment = 0; int write_index = 1; int decode_done; char *dot; int ret; unsigned int i; int remove_file; struct sigaction act; int opt; int longindex; char *endptr; struct options_t options; static const char *optstring = "i:d:p:m:u:r::n:ovh?"; static const struct option longopts[] = { { "input", required_argument, NULL, 'i' }, { "duration", required_argument, NULL, 'd' }, { "output-prefix", required_argument, NULL, 'p' }, { "m3u8-file", required_argument, NULL, 'm' }, { "url-prefix", required_argument, NULL, 'u' }, { "resume", optional_argument, NULL, 'r' }, { "num-segments", required_argument, NULL, 'n' }, { "help", no_argument, NULL, 'h' }, { 0, 0, 0, 0 } }; memset(&options, 0 ,sizeof(options)); /* Set some defaults */ options.segment_duration = 10; options.num_segments = 0; do { opt = getopt_long(argc, argv, optstring, longopts, &longindex ); switch (opt) { case 'i': options.input_file = optarg; if (!strcmp(options.input_file, "-")) { options.input_file = "pipe:"; } break; case 'd': options.segment_duration = strtol(optarg, &endptr, 10); if (optarg == endptr || options.segment_duration < 0 || options.segment_duration == -LONG_MAX) { fprintf(stderr, "Segment duration time (%s) invalid\n", optarg); exit(1); } break; case 'p': options.output_prefix = optarg; break; case 'm': options.m3u8_file = optarg; break; case 'u': options.url_prefix = optarg; break; case 'r': if (optarg && strtol(optarg, &endptr, 10)) { resume = strtol(optarg, &endptr, 10); } else { resume = -1; } break; case 'n': options.num_segments = strtol(optarg, &endptr, 10); if (optarg == endptr || options.num_segments < 0 || options.num_segments >= LONG_MAX) { fprintf(stderr, "Maximum number of ts files (%s) invalid\n", optarg); exit(1); } break; case 'h': display_usage(); break; } } while (opt != -1); /* Check required args where set*/ if (options.input_file == NULL) { fprintf(stderr, "Please specify an input file.\n"); exit(1); } if (options.output_prefix == NULL) { fprintf(stderr, "Please specify an output prefix.\n"); exit(1); } if (options.m3u8_file == NULL) { fprintf(stderr, "Please specify an m3u8 output file.\n"); exit(1); } if (options.url_prefix == NULL) { fprintf(stderr, "Please specify a url prefix.\n"); exit(1); } av_register_all(); remove_filename = malloc(sizeof(char) * (strlen(options.output_prefix) + 15)); if (!remove_filename) { fprintf(stderr, "Could not allocate space for remove filenames\n"); exit(1); } output_filename = malloc(sizeof(char) * (strlen(options.output_prefix) + 15)); if (!output_filename) { fprintf(stderr, "Could not allocate space for output filenames\n"); exit(1); } options.tmp_m3u8_file = malloc(strlen(options.m3u8_file) + 2); if (!options.tmp_m3u8_file) { fprintf(stderr, "Could not allocate space for temporary index filename\n"); exit(1); } //check if we want to continue an existing stream if (resume != 0) { if (resume == -1) { last_chunk = get_last_chunk(options.output_prefix); } else { last_chunk = resume; } last_segment = last_chunk - 1; output_index = last_chunk; } // Use a dotfile as a temporary file strncpy(options.tmp_m3u8_file, options.m3u8_file, strlen(options.m3u8_file) + 2); dot = strrchr(options.tmp_m3u8_file, '/'); dot = dot ? dot + 1 : options.tmp_m3u8_file; memmove(dot + 1, dot, strlen(dot)); *dot = '.'; ifmt = av_find_input_format("mpegts"); if (!ifmt) { fprintf(stderr, "Could not find MPEG-TS demuxer\n"); exit(1); } ret = avformat_open_input(&ic, options.input_file, ifmt, NULL); if (ret != 0) { fprintf(stderr, "Could not open input file, make sure it is an mpegts file: %d\n", ret); exit(1); } if (avformat_find_stream_info(ic, NULL) < 0) { fprintf(stderr, "Could not read stream information\n"); exit(1); } ofmt = av_guess_format("mpegts", NULL, NULL); if (!ofmt) { fprintf(stderr, "Could not find MPEG-TS muxer\n"); exit(1); } oc = avformat_alloc_context(); if (!oc) { fprintf(stderr, "Could not allocated output context"); exit(1); } oc->oformat = ofmt; for (i = 0; i < ic->nb_streams && (video_index < 0 || audio_index < 0); i++) { switch (ic->streams[i]->codec->codec_type) { case AVMEDIA_TYPE_VIDEO: video_index = i; ic->streams[i]->discard = AVDISCARD_NONE; video_st = add_output_stream(oc, ic->streams[i]); break; case AVMEDIA_TYPE_AUDIO: audio_index = i; ic->streams[i]->discard = AVDISCARD_NONE; audio_st = add_output_stream(oc, ic->streams[i]); break; default: ic->streams[i]->discard = AVDISCARD_ALL; break; } } // Don't print warnings when PTS and DTS are identical. ic->flags |= AVFMT_FLAG_IGNDTS; av_dump_format(oc, 0, options.output_prefix, 1); if (video_st) { codec = avcodec_find_decoder(video_st->codec->codec_id); if (!codec) { fprintf(stderr, "Could not find video decoder %x, key frames will not be honored\n", video_st->codec->codec_id); } if (avcodec_open2(video_st->codec, codec, NULL) < 0) { fprintf(stderr, "Could not open video decoder, key frames will not be honored\n"); } } snprintf(output_filename, strlen(options.output_prefix) + 15, "%s-%u.ts", options.output_prefix, output_index++); if (avio_open(&oc->pb, output_filename, AVIO_FLAG_WRITE) < 0) { fprintf(stderr, "Could not open '%s'\n", output_filename); exit(1); } if (avformat_write_header(oc, NULL)) { fprintf(stderr, "Could not write mpegts header to first output file\n"); exit(1); } write_index = !write_index_file(options, first_segment, last_segment, 0); /* Setup signals */ memset(&act, 0, sizeof(act)); act.sa_handler = &handler; sigaction(SIGINT, &act, NULL); sigaction(SIGTERM, &act, NULL); do { double segment_time = prev_segment_time; AVPacket packet; if (terminate) { break; } decode_done = av_read_frame(ic, &packet); if (decode_done < 0) { break; } if (av_dup_packet(&packet) < 0) { fprintf(stderr, "Could not duplicate packet"); av_free_packet(&packet); break; } // Use video stream as time base and split at keyframes. Otherwise use audio stream if (packet.stream_index == video_index && (packet.flags & AV_PKT_FLAG_KEY)) { segment_time = packet.pts * av_q2d(video_st->time_base); } else if (video_index < 0) { segment_time = packet.pts * av_q2d(audio_st->time_base); } else { segment_time = prev_segment_time; } if (segment_time - prev_segment_time >= options.segment_duration) { av_write_trailer(oc); // close ts file and free memory avio_flush(oc->pb); avio_close(oc->pb); if (options.num_segments && (int)(last_segment - first_segment) >= options.num_segments - 1) { remove_file = 1; first_segment++; } else { remove_file = 0; } if (write_index) { write_index = !write_index_file(options, first_segment, ++last_segment, 0); } if (remove_file) { snprintf(remove_filename, strlen(options.output_prefix) + 15, "%s-%u.ts", options.output_prefix, first_segment - 1); remove(remove_filename); } snprintf(output_filename, strlen(options.output_prefix) + 15, "%s-%u.ts", options.output_prefix, output_index++); if (avio_open(&oc->pb, output_filename, AVIO_FLAG_WRITE) < 0) { fprintf(stderr, "Could not open '%s'\n", output_filename); break; } // Write a new header at the start of each file if (avformat_write_header(oc, NULL)) { fprintf(stderr, "Could not write mpegts header to first output file\n"); exit(1); } prev_segment_time = segment_time; } ret = av_interleaved_write_frame(oc, &packet); if (ret < 0) { fprintf(stderr, "Warning: Could not write frame of stream\n"); } else if (ret > 0) { fprintf(stderr, "End of stream requested\n"); av_free_packet(&packet); break; } av_free_packet(&packet); } while (!decode_done); av_write_trailer(oc); if (video_st) { avcodec_close(video_st->codec); } for(i = 0; i < oc->nb_streams; i++) { av_freep(&oc->streams[i]->codec); av_freep(&oc->streams[i]); } avio_close(oc->pb); av_free(oc); if (options.num_segments && (int)(last_segment - first_segment) >= options.num_segments - 1) { remove_file = 1; first_segment++; } else { remove_file = 0; } if (write_index) { write_index_file(options, first_segment, ++last_segment, 1); } if (remove_file) { snprintf(remove_filename, strlen(options.output_prefix) + 15, "%s-%u.ts", options.output_prefix, first_segment - 1); remove(remove_filename); } return 0; }
void recorder::init_context(unsigned width, unsigned height) { assert(!avcontext_); if (output_.bad()) { goto init_failed; } outputfmt_ = av_guess_format ("avi", NULL, NULL); assert(outputfmt_); fmtcontext_ = avformat_alloc_context(); fmtcontext_->oformat = outputfmt_; std::copy(path_.begin(), path_.end(), fmtcontext_->filename); assert(outputfmt_->video_codec != CODEC_ID_NONE); video_st_ = avformat_new_stream(fmtcontext_, 0); assert(video_st_); avcontext_ = video_st_->codec; avcodec_ = avcodec_find_encoder(VIDEO_CODEC); avcodec_get_context_defaults3(avcontext_, avcodec_); yuvframe_ = avcodec_alloc_frame(); rgbframe_ = avcodec_alloc_frame(); avcontext_->codec_id = VIDEO_CODEC; avcontext_->codec_type = AVMEDIA_TYPE_VIDEO; avcontext_->width = width; avcontext_->height = height; avcontext_->flags = CODEC_FLAG_4MV | CODEC_FLAG_AC_PRED | CODEC_FLAG_PASS1; avcontext_->mb_decision = FF_MB_DECISION_RD; avcontext_->me_cmp = 2; avcontext_->me_sub_cmp = 2; avcontext_->trellis = 2; avcontext_->bit_rate = 2000000*1000; avcontext_->bit_rate_tolerance = avcontext_->bit_rate; avcontext_->b_frame_strategy = 1; avcontext_->coder_type = 1; avcontext_->me_method = ME_EPZS; avcontext_->me_subpel_quality = 5; avcontext_->i_quant_factor = 0.71; avcontext_->qcompress = 0.6; avcontext_->max_qdiff = 4; //DEPRECATED avcontext_->directpred = 1; avcontext_->gop_size = 300; avcontext_->max_b_frames=3; avcontext_->time_base.den = frame_per_seconds_; avcontext_->time_base.num = 1; avcontext_->pix_fmt = FRAME_FORMAT; av_dump_format(fmtcontext_, 0, path_.c_str(), 1); // find the mpeg1 video encoder if (!avcodec_) { std::cerr << "Codec not found" << std::endl; goto init_failed; } int err; avopts_ = 0; // open the codec. if ((err = avcodec_open2(avcontext_, avcodec_, &avopts_)) < 0) { char err_message[1000]; memset(err_message, 0, 1000); int err_err = av_strerror(-err, err_message, 1000); std::cerr << "avcodec_open2 Could not open codec: error " << err_err << ": " << err_message << std::endl; goto init_failed; } // alloc image and output buffer video_buffer_size_ = 3000000; video_buffer_ = new uint8_t[video_buffer_size_]; // Initialization of ffmpeg frames. { int rgb_size = avpicture_get_size(PIX_FMT_RGB24, avcontext_->width, avcontext_->height); window_capture_size_ = rgb_size * sizeof(uint8_t); // size for RGB window_capture_ = new unsigned char[window_capture_size_]; window_capture_width_ = avcontext_->width; window_capture_height_ = avcontext_->height; avpicture_fill((AVPicture *)rgbframe_, window_capture_, PIX_FMT_RGB24, avcontext_->width, avcontext_->height); int yuv_size = avpicture_get_size(PIX_FMT_YUV420P, avcontext_->width, avcontext_->height); avpicture_fill((AVPicture *)yuvframe_, new uint8_t[yuv_size * sizeof(uint8_t)], PIX_FMT_YUV420P, avcontext_->width, avcontext_->height); } // Swscale context. swcontext_ = sws_getCachedContext(0, width, height, PIX_FMT_RGB24, width, height, FRAME_FORMAT, SWS_POINT, 0, 0, 0); if(!swcontext_) { std::cerr<< "Cannot initialize the swscale conversion context" << std::endl; goto init_failed; } /* open the output file, if needed */ if (!(outputfmt_->flags & AVFMT_NOFILE)) { #ifdef URL_WRONLY if (avio_open (&fmtcontext_->pb, path_.c_str(), URL_WRONLY) < 0) #else if (avio_open (&fmtcontext_->pb, path_.c_str(), AVIO_FLAG_WRITE) < 0) #endif { std::cerr << "Could not open "<< path_ << std::endl; assert(0 && "Could not open ouput file"); } } /* write the stream header, if any */ avformat_write_header(fmtcontext_, NULL); return; init_failed: init_failed_ = true; if (avcontext_) av_free(avcontext_); if (swcontext_) sws_freeContext(swcontext_); if (yuvframe_) av_free(yuvframe_); if (rgbframe_) av_free(rgbframe_); if (video_buffer_) delete [] video_buffer_; if (window_capture_) delete [] window_capture_; output_.close(); std::cerr << "Video recording disabled." << std::endl; return; }
void VideoStream::SetupFormat( ) { /* allocate the output media context */ ofc = NULL; #if ((LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(53, 5, 0)) && (LIBAVFORMAT_VERSION_MICRO >= 100)) avformat_alloc_output_context2( &ofc, NULL, format, filename ); #else AVFormatContext *s= avformat_alloc_context(); if(!s) { Fatal( "avformat_alloc_context failed %d \"%s\"", (size_t)ofc, av_err2str((size_t)ofc) ); } AVOutputFormat *oformat; if (format) { #if LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(52, 45, 0) oformat = av_guess_format(format, NULL, NULL); #else oformat = guess_format(format, NULL, NULL); #endif if (!oformat) { Fatal( "Requested output format '%s' is not a suitable output format", format ); } } else { #if LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(52, 45, 0) oformat = av_guess_format(NULL, filename, NULL); #else oformat = guess_format(NULL, filename, NULL); #endif if (!oformat) { Fatal( "Unable to find a suitable output format for '%s'", format ); } } s->oformat = oformat; if (s->oformat->priv_data_size > 0) { s->priv_data = av_mallocz(s->oformat->priv_data_size); if (!s->priv_data) { Fatal( "Could not allocate private data for output format." ); } #if LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(52, 92, 0) if (s->oformat->priv_class) { *(const AVClass**)s->priv_data = s->oformat->priv_class; av_opt_set_defaults(s->priv_data); } #endif } else { s->priv_data = NULL; } if(filename) { snprintf( s->filename, sizeof(s->filename), filename ); } ofc = s; #endif if ( !ofc ) { Fatal( "avformat_alloc_..._context failed: %d", ofc ); } of = ofc->oformat; Debug( 1, "Using output format: %s (%s)", of->name, of->long_name ); }
int main(int argc, char *argv[]) { IDeckLinkIterator *deckLinkIterator = CreateDeckLinkIteratorInstance(); DeckLinkCaptureDelegate *delegate; BMDDisplayMode selectedDisplayMode = bmdModeNTSC; int displayModeCount = 0; int exitStatus = 1; int connection = 0, camera = 0, i=0; int ch; HRESULT result; pthread_mutex_init(&sleepMutex, NULL); pthread_cond_init(&sleepCond, NULL); av_register_all(); if (!deckLinkIterator) { fprintf(stderr, "This application requires the DeckLink drivers installed.\n"); goto bail; } // Parse command line options while ((ch = getopt(argc, argv, "?hc:s:f:a:m:n:F:C:I:")) != -1) { switch (ch) { case 'm': g_videoModeIndex = atoi(optarg); break; case 'c': g_audioChannels = atoi(optarg); if (g_audioChannels != 2 && g_audioChannels != 8 && g_audioChannels != 16) { fprintf(stderr, "Invalid argument: Audio Channels must be either 2, 8 or 16\n"); goto bail; } break; case 's': g_audioSampleDepth = atoi(optarg); if (g_audioSampleDepth != 16 && g_audioSampleDepth != 32) { fprintf(stderr, "Invalid argument: Audio Sample Depth must be either 16 bits or 32 bits\n"); goto bail; } break; case 'f': g_videoOutputFile = optarg; break; case 'n': g_maxFrames = atoi(optarg); break; case 'F': fmt = av_guess_format(optarg, NULL, NULL); break; case 'I': connection = atoi(optarg); break; case 'C': camera = atoi(optarg); break; case '?': case 'h': usage(0); } } /* Connect to the first DeckLink instance */ do { result = deckLinkIterator->Next(&deckLink); } while(i++<camera); if (result != S_OK) { fprintf(stderr, "No DeckLink PCI cards found.\n"); goto bail; } if (deckLink->QueryInterface(IID_IDeckLinkInput, (void**)&deckLinkInput) != S_OK) goto bail; result = deckLink->QueryInterface(IID_IDeckLinkConfiguration, (void**)&deckLinkConfiguration); if (result != S_OK) { fprintf(stderr, "Could not obtain the IDeckLinkConfiguration interface - result = %08x\n", result); goto bail; } //XXX make it generic if (connection == 1) { // video compuesto + audio analogico deckLinkConfiguration->SetInt(bmdDeckLinkConfigVideoInputConnection, bmdVideoConnectionComposite); deckLinkConfiguration->SetInt(bmdDeckLinkConfigAudioInputConnection, bmdAudioConnectionAnalog); }else if (connection == 2) { // video componentes + audio analogico deckLinkConfiguration->SetInt(bmdDeckLinkConfigVideoInputConnection, bmdVideoConnectionComponent); deckLinkConfiguration->SetInt(bmdDeckLinkConfigAudioInputConnection, bmdAudioConnectionAnalog); }else if (connection == 3) { // HDMI video + audio deckLinkConfiguration->SetInt(bmdDeckLinkConfigVideoInputConnection, bmdVideoConnectionHDMI); deckLinkConfiguration->SetInt(bmdDeckLinkConfigAudioInputConnection, bmdAudioConnectionEmbedded); }else if (connection == 4) { // SDI video + audio deckLinkConfiguration->SetInt(bmdDeckLinkConfigVideoInputConnection, bmdVideoConnectionSDI); deckLinkConfiguration->SetInt(bmdDeckLinkConfigAudioInputConnection, bmdAudioConnectionEmbedded); } delegate = new DeckLinkCaptureDelegate(); deckLinkInput->SetCallback(delegate); // Obtain an IDeckLinkDisplayModeIterator to enumerate the display modes supported on output result = deckLinkInput->GetDisplayModeIterator(&displayModeIterator); if (result != S_OK) { fprintf(stderr, "Could not obtain the video output display mode iterator - result = %08x\n", result); goto bail; } if (!fmt) fmt = av_guess_format(NULL, g_videoOutputFile, NULL); if (g_videoModeIndex < 0) { fprintf(stderr, "No video mode specified\n"); usage(0); } selectedDisplayMode = -1; while (displayModeIterator->Next(&displayMode) == S_OK) { if (g_videoModeIndex == displayModeCount) { selectedDisplayMode = displayMode->GetDisplayMode(); break; } displayModeCount++; displayMode->Release(); } oc = avformat_alloc_context(); oc->oformat = fmt; snprintf(oc->filename, sizeof(oc->filename), "%s", g_videoOutputFile); fmt->video_codec = CODEC_ID_RAWVIDEO; fmt->audio_codec = CODEC_ID_PCM_S16LE; video_st = add_video_stream(oc, fmt->video_codec); audio_st = add_audio_stream(oc, fmt->audio_codec); av_set_parameters(oc, NULL); if (!(fmt->flags & AVFMT_NOFILE)) { if (url_fopen(&oc->pb, oc->filename, URL_WRONLY) < 0) { fprintf(stderr, "Could not open '%s'\n", oc->filename); exit(1); } } if (selectedDisplayMode < 0) { fprintf(stderr, "Invalid mode %d specified\n", g_videoModeIndex); goto bail; } result = deckLinkInput->EnableVideoInput(selectedDisplayMode, bmdFormat8BitYUV, 0); if(result != S_OK) { fprintf(stderr, "Failed to enable video input. Is another application using the card?\n"); goto bail; } result = deckLinkInput->EnableAudioInput(bmdAudioSampleRate48kHz, g_audioSampleDepth, g_audioChannels); if(result != S_OK) { goto bail; } av_write_header(oc); result = deckLinkInput->StartStreams(); if(result != S_OK) { goto bail; } // All Okay. exitStatus = 0; // Block main thread until signal occurs pthread_mutex_lock(&sleepMutex); pthread_cond_wait(&sleepCond, &sleepMutex); pthread_mutex_unlock(&sleepMutex); fprintf(stderr, "Stopping Capture\n"); bail: if (displayModeIterator != NULL) { displayModeIterator->Release(); displayModeIterator = NULL; } if (deckLinkInput != NULL) { deckLinkInput->Release(); deckLinkInput = NULL; } if (deckLink != NULL) { deckLink->Release(); deckLink = NULL; } if (deckLinkIterator != NULL) deckLinkIterator->Release(); if (oc != NULL) { av_write_trailer(oc); if (!(fmt->flags & AVFMT_NOFILE)) { /* close the output file */ url_fclose(oc->pb); } } return exitStatus; }
int groove_file_save_as(struct GrooveFile *file, const char *filename) { struct GrooveFilePrivate *f = (struct GrooveFilePrivate *) file; // detect output format AVOutputFormat *ofmt = av_guess_format(f->ic->iformat->name, f->ic->filename, NULL); if (!ofmt) { return GrooveErrorUnknownFormat; } // allocate output media context f->oc = avformat_alloc_context(); if (!f->oc) { cleanup_save(file); return GrooveErrorNoMem; } f->oc->oformat = ofmt; snprintf(f->oc->filename, sizeof(f->oc->filename), "%s", filename); // open output file if needed if (!(ofmt->flags & AVFMT_NOFILE)) { if (avio_open(&f->oc->pb, f->oc->filename, AVIO_FLAG_WRITE) < 0) { cleanup_save(file); return GrooveErrorFileSystem; } f->tempfile_exists = 1; } if (f->ic->nb_streams > INT_MAX) { cleanup_save(file); return GrooveErrorTooManyStreams; } int stream_count = (int)f->ic->nb_streams; // add all the streams for (int i = 0; i < stream_count; i++) { AVStream *in_stream = f->ic->streams[i]; AVStream *out_stream = avformat_new_stream(f->oc, NULL); if (!out_stream) { cleanup_save(file); return GrooveErrorNoMem; } out_stream->id = in_stream->id; out_stream->disposition = in_stream->disposition; out_stream->time_base = in_stream->time_base; AVCodecContext *icodec = in_stream->codec; AVCodecContext *ocodec = out_stream->codec; ocodec->bits_per_raw_sample = icodec->bits_per_raw_sample; ocodec->chroma_sample_location = icodec->chroma_sample_location; ocodec->codec_id = icodec->codec_id; ocodec->codec_type = icodec->codec_type; if (!ocodec->codec_tag) { if (!f->oc->oformat->codec_tag || av_codec_get_id (f->oc->oformat->codec_tag, icodec->codec_tag) == ocodec->codec_id || av_codec_get_tag(f->oc->oformat->codec_tag, icodec->codec_id) <= 0) ocodec->codec_tag = icodec->codec_tag; } ocodec->bit_rate = icodec->bit_rate; ocodec->rc_max_rate = icodec->rc_max_rate; ocodec->rc_buffer_size = icodec->rc_buffer_size; ocodec->field_order = icodec->field_order; uint64_t extra_size = (uint64_t)icodec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE; if (extra_size > INT_MAX) { cleanup_save(file); return GrooveErrorEncoding; } ocodec->extradata = allocate<uint8_t>(extra_size); if (!ocodec->extradata) { cleanup_save(file); return GrooveErrorNoMem; } memcpy(ocodec->extradata, icodec->extradata, icodec->extradata_size); ocodec->extradata_size = icodec->extradata_size; switch (ocodec->codec_type) { case AVMEDIA_TYPE_AUDIO: ocodec->channel_layout = icodec->channel_layout; ocodec->sample_rate = icodec->sample_rate; ocodec->channels = icodec->channels; ocodec->frame_size = icodec->frame_size; ocodec->audio_service_type = icodec->audio_service_type; ocodec->block_align = icodec->block_align; break; case AVMEDIA_TYPE_VIDEO: ocodec->pix_fmt = icodec->pix_fmt; ocodec->width = icodec->width; ocodec->height = icodec->height; ocodec->has_b_frames = icodec->has_b_frames; if (!ocodec->sample_aspect_ratio.num) { ocodec->sample_aspect_ratio = out_stream->sample_aspect_ratio = in_stream->sample_aspect_ratio.num ? in_stream->sample_aspect_ratio : icodec->sample_aspect_ratio.num ? icodec->sample_aspect_ratio : AVRational{0, 1}; } break; case AVMEDIA_TYPE_SUBTITLE: ocodec->width = icodec->width; ocodec->height = icodec->height; break; case AVMEDIA_TYPE_DATA: case AVMEDIA_TYPE_ATTACHMENT: break; default: cleanup_save(file); return GrooveErrorEncoding; } } // set metadata av_dict_copy(&f->oc->metadata, f->ic->metadata, 0); if (avformat_write_header(f->oc, NULL) < 0) { cleanup_save(file); return GrooveErrorEncoding; } AVPacket *pkt = &f->audio_pkt; for (;;) { int err = av_read_frame(f->ic, pkt); if (err == AVERROR_EOF) { break; } else if (err < 0) { cleanup_save(file); return GrooveErrorDecoding; } if (av_write_frame(f->oc, pkt) < 0) { cleanup_save(file); return GrooveErrorEncoding; } av_free_packet(pkt); } if (av_write_trailer(f->oc) < 0) { cleanup_save(file); return GrooveErrorEncoding; } f->tempfile_exists = 0; cleanup_save(file); return 0; }
/** * get_oformat * Obtains the output format used for the specified codec. For mpeg4 codecs, * the format is avi; for mpeg1 codec, the format is mpeg. The filename has * to be passed, because it gets the appropriate extension appended onto it. * * Returns * AVOutputFormat pointer or NULL if any error happens. */ static AVOutputFormat *get_oformat(const char *codec, char *filename) { const char *ext; AVOutputFormat *of = NULL; /* * Here, we use guess_format to automatically setup the codec information. * If we are using msmpeg4, manually set that codec here. * We also dynamically add the file extension to the filename here. This was * done to support both mpeg1 and mpeg4 codecs since they have different extensions. */ if (strcmp(codec, "tlapse") == 0) { ext = ".swf"; of = av_guess_format("swf", NULL, NULL); if (of) of->video_codec = MY_CODEC_ID_MPEG2VIDEO; } else if (strcmp(codec, "mpeg4") == 0) { ext = ".avi"; of = av_guess_format("avi", NULL, NULL); } else if (strcmp(codec, "msmpeg4") == 0) { ext = ".avi"; of = av_guess_format("avi", NULL, NULL); /* Manually override the codec id. */ if (of) of->video_codec = MY_CODEC_ID_MSMPEG4V2; } else if (strcmp(codec, "swf") == 0) { ext = ".swf"; of = av_guess_format("swf", NULL, NULL); } else if (strcmp(codec, "flv") == 0) { ext = ".flv"; of = av_guess_format("flv", NULL, NULL); of->video_codec = MY_CODEC_ID_FLV1; } else if (strcmp(codec, "ffv1") == 0) { ext = ".avi"; of = av_guess_format("avi", NULL, NULL); if (of) of->video_codec = MY_CODEC_ID_FFV1; } else if (strcmp(codec, "mov") == 0) { ext = ".mov"; of = av_guess_format("mov", NULL, NULL); } else if (strcmp (codec, "ogg") == 0) { ext = ".ogg"; of = av_guess_format ("ogg", NULL, NULL); } else if (strcmp (codec, "mp4") == 0){ ext = ".mp4"; of = av_guess_format ("mp4", NULL, NULL); of->video_codec = MY_CODEC_ID_H264; } else if (strcmp (codec, "mkv") == 0){ ext = ".mkv"; of = av_guess_format ("matroska", NULL, NULL); of->video_codec = MY_CODEC_ID_H264; } else if (strcmp (codec, "hevc") == 0){ ext = ".mp4"; of = av_guess_format ("mp4", NULL, NULL); of->video_codec = MY_CODEC_ID_HEVC; } else { MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: ffmpeg_video_codec option value" " %s is not supported", codec); return NULL; } if (!of) { MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: Could not guess format for %s", codec); return NULL; } /* The 4 allows for ".avi" or ".mpg" to be appended. */ strncat(filename, ext, 4); return of; }
int main(int argc, char **argv) { if(argc != 5) { fprintf(stderr, "Usage: %s <segment length> <output location> <filename prefix> <encoding profile>\n", argv[0]); return 1; } struct config_info config; memset(&config, 0, sizeof(struct config_info)); config.segment_length = atoi(argv[1]); config.temp_directory = argv[2]; config.filename_prefix = argv[3]; config.encoding_profile = argv[4]; config.input_filename = "pipe://1"; char *output_filename = malloc(sizeof(char) * (strlen(config.temp_directory) + 1 + strlen(config.filename_prefix) + 10)); if (!output_filename) { fprintf(stderr, "Segmenter error: Could not allocate space for output filenames\n"); exit(1); } // ------------------ Done parsing input -------------- av_register_all(); AVInputFormat *input_format = av_find_input_format("mpegts"); if (!input_format) { fprintf(stderr, "Segmenter error: Could not find MPEG-TS demuxer\n"); exit(1); } AVFormatContext *input_context = NULL; int ret = av_open_input_file(&input_context, config.input_filename, input_format, 0, NULL); if (ret != 0) { fprintf(stderr, "Segmenter error: Could not open input file, make sure it is an mpegts file: %d\n", ret); exit(1); } if (av_find_stream_info(input_context) < 0) { fprintf(stderr, "Segmenter error: Could not read stream information\n"); exit(1); } #if LIBAVFORMAT_VERSION_MAJOR >= 52 && LIBAVFORMAT_VERSION_MINOR >= 45 AVOutputFormat *output_format = av_guess_format("mpegts", NULL, NULL); #else AVOutputFormat *output_format = guess_format("mpegts", NULL, NULL); #endif if (!output_format) { fprintf(stderr, "Segmenter error: Could not find MPEG-TS muxer\n"); exit(1); } AVFormatContext *output_context = avformat_alloc_context(); if (!output_context) { fprintf(stderr, "Segmenter error: Could not allocated output context"); exit(1); } output_context->oformat = output_format; int video_index = -1; int audio_index = -1; AVStream *video_stream; AVStream *audio_stream; int i; for (i = 0; i < input_context->nb_streams && (video_index < 0 || audio_index < 0); i++) { switch (input_context->streams[i]->codec->codec_type) { case CODEC_TYPE_VIDEO: video_index = i; input_context->streams[i]->discard = AVDISCARD_NONE; video_stream = add_output_stream(output_context, input_context->streams[i]); break; case CODEC_TYPE_AUDIO: audio_index = i; input_context->streams[i]->discard = AVDISCARD_NONE; audio_stream = add_output_stream(output_context, input_context->streams[i]); break; default: input_context->streams[i]->discard = AVDISCARD_ALL; break; } } if (av_set_parameters(output_context, NULL) < 0) { fprintf(stderr, "Segmenter error: Invalid output format parameters\n"); exit(1); } dump_format(output_context, 0, config.filename_prefix, 1); if(video_index >= 0) { AVCodec *codec = avcodec_find_decoder(video_stream->codec->codec_id); if (!codec) { fprintf(stderr, "Segmenter error: Could not find video decoder, key frames will not be honored\n"); } if (avcodec_open(video_stream->codec, codec) < 0) { fprintf(stderr, "Segmenter error: Could not open video decoder, key frames will not be honored\n"); } } unsigned int output_index = 1; snprintf(output_filename, strlen(config.temp_directory) + 1 + strlen(config.filename_prefix) + 10, "%s/%s-%05u.ts", config.temp_directory, config.filename_prefix, output_index++); if (url_fopen(&output_context->pb, output_filename, URL_WRONLY) < 0) { fprintf(stderr, "Segmenter error: Could not open '%s'\n", output_filename); exit(1); } if (av_write_header(output_context)) { fprintf(stderr, "Segmenter error: Could not write mpegts header to first output file\n"); exit(1); } unsigned int first_segment = 1; unsigned int last_segment = 0; double prev_segment_time = 0; int decode_done; do { double segment_time; AVPacket packet; decode_done = av_read_frame(input_context, &packet); if (decode_done < 0) { break; } if (av_dup_packet(&packet) < 0) { fprintf(stderr, "Segmenter error: Could not duplicate packet"); av_free_packet(&packet); break; } if (packet.stream_index == video_index && (packet.flags & PKT_FLAG_KEY)) { segment_time = (double)video_stream->pts.val * video_stream->time_base.num / video_stream->time_base.den; } else if (video_index < 0) { segment_time = (double)audio_stream->pts.val * audio_stream->time_base.num / audio_stream->time_base.den; } else { segment_time = prev_segment_time; } // done writing the current file? if (segment_time - prev_segment_time >= config.segment_length) { put_flush_packet(output_context->pb); url_fclose(output_context->pb); output_transfer_command(first_segment, ++last_segment, 0, config.encoding_profile); snprintf(output_filename, strlen(config.temp_directory) + 1 + strlen(config.filename_prefix) + 10, "%s/%s-%05u.ts", config.temp_directory, config.filename_prefix, output_index++); if (url_fopen(&output_context->pb, output_filename, URL_WRONLY) < 0) { fprintf(stderr, "Segmenter error: Could not open '%s'\n", output_filename); break; } prev_segment_time = segment_time; } ret = av_interleaved_write_frame(output_context, &packet); if (ret < 0) { fprintf(stderr, "Segmenter error: Could not write frame of stream: %d\n", ret); } else if (ret > 0) { fprintf(stderr, "Segmenter info: End of stream requested\n"); av_free_packet(&packet); break; } av_free_packet(&packet); } while (!decode_done); av_write_trailer(output_context); if (video_index >= 0) { avcodec_close(video_stream->codec); } for(i = 0; i < output_context->nb_streams; i++) { av_freep(&output_context->streams[i]->codec); av_freep(&output_context->streams[i]); } url_fclose(output_context->pb); av_free(output_context); output_transfer_command(first_segment, ++last_segment, 1, config.encoding_profile); return 0; }
void create_segments(struct segment_context * ctx) { long max_tsfiles = 0; char *max_tsfiles_check; double prev_segment_time = -1; unsigned int output_index = ctx->start_from; AVInputFormat *ifmt; AVOutputFormat *ofmt; AVFormatContext *ic = NULL; AVFormatContext *oc; AVStream *video_st = NULL; AVStream *audio_st = NULL;; AVCodec *codec; char *output_filename; char *output_format; int video_index; int audio_index; unsigned int first_segment = 1; unsigned int last_segment = 0; int decode_done; int ret; int i; int remove_file; int write_index = -1; av_register_all(); ifmt = av_find_input_format("mpegts"); if (!ifmt) { fprintf(stderr, "Could not find MPEG-TS demuxer\n"); exit(1); } ret = avformat_open_input(&ic, ctx->input, ifmt, NULL); if (ret != 0) { fprintf(stderr, "Could not open input file, make sure it is an mpegts file: %d\n", ret); exit(1); } if (avformat_find_stream_info(ic, NULL) < 0) { fprintf(stderr, "Could not read stream information\n"); exit(1); } ofmt = av_guess_format("mpegts", NULL, NULL); if (!ofmt) { fprintf(stderr, "Could not find MPEG-TS muxer\n"); exit(1); } write_index = index_file_open(ctx); if(write_index >= 0) index_file_write_headers(ctx); oc = avformat_alloc_context(); if (!oc) { fprintf(stderr, "Could not allocated output context"); exit(1); } oc->oformat = ofmt; video_index = -1; audio_index = -1; for (i = 0; i < ic->nb_streams && (video_index < 0 || audio_index < 0); i++) { switch (ic->streams[i]->codec->codec_type) { case AVMEDIA_TYPE_VIDEO: video_index = i; ic->streams[i]->discard = AVDISCARD_NONE; video_st = add_output_stream(oc, ic->streams[i]); break; case AVMEDIA_TYPE_AUDIO: audio_index = i; ic->streams[i]->discard = AVDISCARD_NONE; audio_st = add_output_stream(oc, ic->streams[i]); break; default: ic->streams[i]->discard = AVDISCARD_ALL; break; } } av_dump_format(oc, 0, ctx->output_prefix, 1); if(video_st) { codec = avcodec_find_decoder(video_st->codec->codec_id); if (!codec) { fprintf(stderr, "Could not find video decoder, key frames will not be honored\n"); } if (avcodec_open2(video_st->codec, codec, NULL) < 0) { fprintf(stderr, "Could not open video decoder, key frames will not be honored\n"); } } output_filename = malloc(sizeof(char) * (strlen(ctx->output_prefix) + 15)); if (!output_filename) { fprintf(stderr, "Could not allocate space for output filename\n"); exit(1); } output_format = malloc(sizeof(char) * (strlen(ctx->output_prefix) + 15)); if (!output_format) { fprintf(stderr, "Could not allocate space for output format\n"); exit(1); } sprintf(output_format, "%%s%s%%0%uu.ts", ctx->separator, ctx->precision); sprintf(output_filename, output_format, ctx->output_prefix, output_index++); if (avio_open(&oc->pb, output_filename, AVIO_FLAG_WRITE) < 0) { fprintf(stderr, "Could not open '%s'\n", output_filename); exit(1); } if (avformat_write_header(oc, NULL) < 0) { fprintf(stderr, "Could not write mpegts header to first output file\n"); exit(1); } AVPacket packet; do { double segment_time; decode_done = av_read_frame(ic, &packet); if (decode_done < 0) { break; } if (prev_segment_time < 0) { if (packet.stream_index == video_index) { prev_segment_time = packet.pts * av_q2d(video_st->time_base); } else { prev_segment_time = packet.pts * av_q2d(audio_st->time_base); } } if (av_dup_packet(&packet) < 0) { fprintf(stderr, "Could not duplicate packet"); av_free_packet(&packet); break; } if (packet.stream_index == video_index && (packet.flags & AV_PKT_FLAG_KEY)) { segment_time = packet.pts * av_q2d(video_st->time_base); } else if (video_index < 0) { segment_time = packet.pts * av_q2d(audio_st->time_base); } else { segment_time = prev_segment_time; } if (segment_time - prev_segment_time >= ctx->segment_duration) { avio_flush(oc->pb); avio_close(oc->pb); if(write_index >= 0) index_file_write_segment(ctx, floor(segment_time - prev_segment_time), output_filename); sprintf(output_filename, output_format, ctx->output_prefix, output_index++); if (avio_open(&oc->pb, output_filename, AVIO_FLAG_WRITE) < 0) { fprintf(stderr, "Could not open '%s'\n", output_filename); break; } prev_segment_time = segment_time; } ret = av_write_frame(oc, &packet); if (ret < 0) { fprintf(stderr, "Warning: Could not write frame of stream\n"); break; } else if (ret > 0) { fprintf(stderr, "End of stream requested\n"); av_free_packet(&packet); break; } av_free_packet(&packet); } while (1); /* loop is exited on break */ double input_duration = (double)ic->duration / 1000000; if(write_index >= 0) index_file_write_segment(ctx, ceil(input_duration - prev_segment_time), output_filename); av_write_trailer(oc); if(video_st) avcodec_close(video_st->codec); for(i = 0; i < oc->nb_streams; i++) { av_freep(&oc->streams[i]->codec); av_freep(&oc->streams[i]); } avio_close(oc->pb); av_free(oc); if(write_index >= 0) index_file_close(ctx); }
static int start_ffmpeg_impl(FFMpegContext *context, struct RenderData *rd, int rectx, int recty, const char *suffix, ReportList *reports) { /* Handle to the output file */ AVFormatContext *of; AVOutputFormat *fmt; AVDictionary *opts = NULL; char name[256], error[1024]; const char **exts; context->ffmpeg_type = rd->ffcodecdata.type; context->ffmpeg_codec = rd->ffcodecdata.codec; context->ffmpeg_audio_codec = rd->ffcodecdata.audio_codec; context->ffmpeg_video_bitrate = rd->ffcodecdata.video_bitrate; context->ffmpeg_audio_bitrate = rd->ffcodecdata.audio_bitrate; context->ffmpeg_gop_size = rd->ffcodecdata.gop_size; context->ffmpeg_autosplit = rd->ffcodecdata.flags & FFMPEG_AUTOSPLIT_OUTPUT; /* Determine the correct filename */ ffmpeg_filepath_get(context, name, rd, context->ffmpeg_preview, suffix); PRINT("Starting output to %s(ffmpeg)...\n" " Using type=%d, codec=%d, audio_codec=%d,\n" " video_bitrate=%d, audio_bitrate=%d,\n" " gop_size=%d, autosplit=%d\n" " render width=%d, render height=%d\n", name, context->ffmpeg_type, context->ffmpeg_codec, context->ffmpeg_audio_codec, context->ffmpeg_video_bitrate, context->ffmpeg_audio_bitrate, context->ffmpeg_gop_size, context->ffmpeg_autosplit, rectx, recty); exts = get_file_extensions(context->ffmpeg_type); if (!exts) { BKE_report(reports, RPT_ERROR, "No valid formats found"); return 0; } fmt = av_guess_format(NULL, exts[0], NULL); if (!fmt) { BKE_report(reports, RPT_ERROR, "No valid formats found"); return 0; } of = avformat_alloc_context(); if (!of) { BKE_report(reports, RPT_ERROR, "Error opening output file"); return 0; } of->oformat = fmt; of->packet_size = rd->ffcodecdata.mux_packet_size; if (context->ffmpeg_audio_codec != AV_CODEC_ID_NONE) { ffmpeg_dict_set_int(&opts, "muxrate", rd->ffcodecdata.mux_rate); } else { av_dict_set(&opts, "muxrate", "0", 0); } ffmpeg_dict_set_int(&opts, "preload", (int)(0.5 * AV_TIME_BASE)); of->max_delay = (int)(0.7 * AV_TIME_BASE); fmt->audio_codec = context->ffmpeg_audio_codec; BLI_strncpy(of->filename, name, sizeof(of->filename)); /* set the codec to the user's selection */ switch (context->ffmpeg_type) { case FFMPEG_AVI: case FFMPEG_MOV: case FFMPEG_MKV: fmt->video_codec = context->ffmpeg_codec; break; case FFMPEG_OGG: fmt->video_codec = AV_CODEC_ID_THEORA; break; case FFMPEG_DV: fmt->video_codec = AV_CODEC_ID_DVVIDEO; break; case FFMPEG_MPEG1: fmt->video_codec = AV_CODEC_ID_MPEG1VIDEO; break; case FFMPEG_MPEG2: fmt->video_codec = AV_CODEC_ID_MPEG2VIDEO; break; case FFMPEG_H264: fmt->video_codec = AV_CODEC_ID_H264; break; case FFMPEG_XVID: fmt->video_codec = AV_CODEC_ID_MPEG4; break; case FFMPEG_FLV: fmt->video_codec = AV_CODEC_ID_FLV1; break; case FFMPEG_MPEG4: default: fmt->video_codec = context->ffmpeg_codec; break; } if (fmt->video_codec == AV_CODEC_ID_DVVIDEO) { if (rectx != 720) { BKE_report(reports, RPT_ERROR, "Render width has to be 720 pixels for DV!"); return 0; } if (rd->frs_sec != 25 && recty != 480) { BKE_report(reports, RPT_ERROR, "Render height has to be 480 pixels for DV-NTSC!"); return 0; } if (rd->frs_sec == 25 && recty != 576) { BKE_report(reports, RPT_ERROR, "Render height has to be 576 pixels for DV-PAL!"); return 0; } } if (context->ffmpeg_type == FFMPEG_DV) { fmt->audio_codec = AV_CODEC_ID_PCM_S16LE; if (context->ffmpeg_audio_codec != AV_CODEC_ID_NONE && rd->ffcodecdata.audio_mixrate != 48000 && rd->ffcodecdata.audio_channels != 2) { BKE_report(reports, RPT_ERROR, "FFMPEG only supports 48khz / stereo audio for DV!"); av_dict_free(&opts); return 0; } } if (fmt->video_codec != AV_CODEC_ID_NONE) { context->video_stream = alloc_video_stream(context, rd, fmt->video_codec, of, rectx, recty, error, sizeof(error)); PRINT("alloc video stream %p\n", context->video_stream); if (!context->video_stream) { if (error[0]) BKE_report(reports, RPT_ERROR, error); else BKE_report(reports, RPT_ERROR, "Error initializing video stream"); av_dict_free(&opts); return 0; } } if (context->ffmpeg_audio_codec != AV_CODEC_ID_NONE) { context->audio_stream = alloc_audio_stream(context, rd, fmt->audio_codec, of, error, sizeof(error)); if (!context->audio_stream) { if (error[0]) BKE_report(reports, RPT_ERROR, error); else BKE_report(reports, RPT_ERROR, "Error initializing audio stream"); av_dict_free(&opts); return 0; } } if (!(fmt->flags & AVFMT_NOFILE)) { if (avio_open(&of->pb, name, AVIO_FLAG_WRITE) < 0) { BKE_report(reports, RPT_ERROR, "Could not open file for writing"); av_dict_free(&opts); return 0; } } if (avformat_write_header(of, NULL) < 0) { BKE_report(reports, RPT_ERROR, "Could not initialize streams, probably unsupported codec combination"); av_dict_free(&opts); avio_close(of->pb); return 0; } context->outfile = of; av_dump_format(of, 0, name, 1); av_dict_free(&opts); return 1; }
/** * @brief * * @return */ int Mp4FileOutput::run() { //const int MAX_EVENT_HEAD_AGE = 2; ///< Number of seconds of video before event to save const int MAX_EVENT_TAIL_AGE = 3; ///< Number of seconds of video after event to save typedef enum { IDLE, PREALARM, ALARM, ALERT } AlarmState; if ( waitForProviders() ) { /* auto detect the output format from the name. default is mpeg. */ AVOutputFormat *outputFormat = av_guess_format( mExtension.c_str(), NULL, NULL ); if ( !outputFormat ) Fatal( "Could not deduce output format from '%s'", mExtension.c_str() ); //AVFormatContext *outputContext = openFile( outputFormat ); AVFormatContext *outputContext = NULL; double videoTimeOffset = 0.0L; uint64_t videoFrameCount = 0; AlarmState alarmState = IDLE; uint64_t alarmTime = 0; int eventCount = 0; while( !mStop ) { while( !mStop ) { mQueueMutex.lock(); if ( !mFrameQueue.empty() ) { for ( FrameQueue::iterator iter = mFrameQueue.begin(); iter != mFrameQueue.end(); iter++ ) { const FeedFrame *frame = iter->get(); Debug( 3, "Frame type %d", frame->mediaType() ); if ( frame->mediaType() == FeedFrame::FRAME_TYPE_VIDEO ) { // This is an alarm detection frame const MotionFrame *motionFrame = dynamic_cast<const MotionFrame *>(frame); //const VideoProvider *provider = dynamic_cast<const VideoProvider *>(frame->provider()); AlarmState lastAlarmState = alarmState; uint64_t now = time64(); Debug( 3, "Motion frame, alarmed %d", motionFrame->alarmed() ); if ( motionFrame->alarmed() ) { alarmState = ALARM; alarmTime = now; if ( lastAlarmState == IDLE ) { // Create new event eventCount++; std::string path = stringtf( "%s/img-%s-%d-%ju.jpg", mLocation.c_str(), mName.c_str(), eventCount, motionFrame->id() ); //Info( "PF:%d @ %dx%d", motionFrame->pixelFormat(), motionFrame->width(), motionFrame->height() ); Image image( motionFrame->pixelFormat(), motionFrame->width(), motionFrame->height(), motionFrame->buffer().data() ); image.writeJpeg( path.c_str() ); } } else if ( lastAlarmState == ALARM ) { alarmState = ALERT; } else if ( lastAlarmState == ALERT ) { Debug( 3, "Frame age %.2lf", frame->age( alarmTime ) ); if ( (0.0l-frame->age( alarmTime )) > MAX_EVENT_TAIL_AGE ) alarmState = IDLE; } else { alarmState = IDLE; } Debug( 3, "Alarm state %d (%d)", alarmState, lastAlarmState ); } else { bool keyFrame = false; const uint8_t *startPos = h264StartCode( frame->buffer().head(), frame->buffer().tail() ); while ( startPos < frame->buffer().tail() ) { while( !*(startPos++) ) ; const uint8_t *nextStartPos = h264StartCode( startPos, frame->buffer().tail() ); int frameSize = nextStartPos-startPos; unsigned char type = startPos[0] & 0x1F; unsigned char nri = startPos[0] & 0x60; Debug( 3, "Frame Type %d, NRI %d (%02x), %d bytes, ts %jd", type, nri>>5, startPos[0], frameSize, frame->timestamp() ); if ( type == NAL_IDR_SLICE ) keyFrame = true; startPos = nextStartPos; } videoTimeOffset += (double)mVideoParms.frameRate().num / mVideoParms.frameRate().den; if ( keyFrame ) { // We can do file opening/closing now if ( alarmState != IDLE && !outputContext ) { outputContext = openFile( outputFormat ); videoTimeOffset = 0.0L; videoFrameCount = 0; } else if ( alarmState == IDLE && outputContext ) { closeFile( outputContext ); outputContext = NULL; } } /*if ( keyFrame && (videoTimeOffset >= mMaxLength) ) { closeFile( outputContext ); outputContext = openFile( outputFormat ); videoTimeOffset = 0.0L; videoFrameCount = 0; }*/ if ( outputContext ) { AVStream *videoStream = outputContext->streams[0]; AVCodecContext *videoCodecContext = videoStream->codec; AVPacket packet; av_init_packet(&packet); packet.flags |= keyFrame ? AV_PKT_FLAG_KEY : 0; packet.stream_index = videoStream->index; packet.data = (uint8_t*)frame->buffer().data(); packet.size = frame->buffer().size(); //packet.pts = packet.dts = AV_NOPTS_VALUE; packet.pts = packet.dts = (videoFrameCount * mVideoParms.frameRate().num * videoCodecContext->time_base.den) / (mVideoParms.frameRate().den * videoCodecContext->time_base.num); Info( "vfc: %ju, vto: %.2lf, kf: %d, pts: %jd", videoFrameCount, videoTimeOffset, keyFrame, packet.pts ); int result = av_interleaved_write_frame(outputContext, &packet); if ( result != 0 ) Fatal( "Error while writing video frame: %d", result ); } videoFrameCount++; } } mFrameQueue.clear(); } mQueueMutex.unlock(); checkProviders(); usleep( INTERFRAME_TIMEOUT ); } } if ( outputContext ) closeFile( outputContext ); } cleanup(); return 0; }
bool AVFormatWriter::Init(void) { if (m_videoOutBuf) delete [] m_videoOutBuf; if (m_width && m_height) m_videoOutBuf = new unsigned char[m_width * m_height * 2 + 10]; AVOutputFormat *fmt = av_guess_format(m_container.toAscii().constData(), NULL, NULL); if (!fmt) { LOG(VB_RECORD, LOG_ERR, LOC + QString("Init(): Unable to guess AVOutputFormat from container %1") .arg(m_container)); return false; } m_fmt = *fmt; if (m_width && m_height) { m_avVideoCodec = avcodec_find_encoder_by_name( m_videoCodec.toAscii().constData()); if (!m_avVideoCodec) { LOG(VB_RECORD, LOG_ERR, LOC + QString("Init(): Unable to find video codec %1").arg(m_videoCodec)); return false; } m_fmt.video_codec = m_avVideoCodec->id; } else m_fmt.video_codec = CODEC_ID_NONE; m_avAudioCodec = avcodec_find_encoder_by_name( m_audioCodec.toAscii().constData()); if (!m_avAudioCodec) { LOG(VB_RECORD, LOG_ERR, LOC + QString("Init(): Unable to find audio codec %1").arg(m_audioCodec)); return false; } m_fmt.audio_codec = m_avAudioCodec->id; m_ctx = avformat_alloc_context(); if (!m_ctx) { LOG(VB_RECORD, LOG_ERR, LOC + "Init(): Unable to allocate AVFormatContext"); return false; } m_ctx->oformat = &m_fmt; if (m_container == "mpegts") m_ctx->packet_size = 2324; snprintf(m_ctx->filename, sizeof(m_ctx->filename), "%s", m_filename.toAscii().constData()); if (m_fmt.video_codec != CODEC_ID_NONE) m_videoStream = AddVideoStream(); if (m_fmt.audio_codec != CODEC_ID_NONE) m_audioStream = AddAudioStream(); m_pkt = new AVPacket; if (!m_pkt) { LOG(VB_RECORD, LOG_ERR, LOC + "Init(): error allocating AVPacket"); return false; } av_new_packet(m_pkt, m_ctx->packet_size); m_audPkt = new AVPacket; if (!m_audPkt) { LOG(VB_RECORD, LOG_ERR, LOC + "Init(): error allocating AVPacket"); return false; } av_new_packet(m_audPkt, m_ctx->packet_size); if ((m_videoStream) && (!OpenVideo())) { LOG(VB_RECORD, LOG_ERR, LOC + "Init(): OpenVideo() failed"); return false; } if ((m_audioStream) && (!OpenAudio())) { LOG(VB_RECORD, LOG_ERR, LOC + "Init(): OpenAudio() failed"); return false; } return true; }
/** * Determine the format. */ bool findFormat(const char* theShortName, const char* theFilename, const char* theMimeType = NULL) { myFormat = av_guess_format(theShortName, theFilename, theMimeType); return myFormat != NULL; }
struct encode_lavc_context *encode_lavc_init(struct encode_opts *options, struct mpv_global *global) { struct encode_lavc_context *ctx; const char *filename = options->file; // STUPID STUPID STUPID STUPID avio // does not support "-" as file name to mean stdin/stdout // ffmpeg.c works around this too, the same way if (!strcmp(filename, "-")) filename = "pipe:1"; if (filename && ( !strcmp(filename, "/dev/stdout") || !strcmp(filename, "pipe:") || !strcmp(filename, "pipe:1"))) mp_msg_force_stderr(global, true); ctx = talloc_zero(NULL, struct encode_lavc_context); pthread_mutex_init(&ctx->lock, NULL); ctx->log = mp_log_new(ctx, global->log, "encode-lavc"); ctx->global = global; encode_lavc_discontinuity(ctx); ctx->options = options; ctx->avc = avformat_alloc_context(); if (ctx->options->format) { char *tok; const char *in = ctx->options->format; while (*in) { tok = av_get_token(&in, ","); ctx->avc->oformat = av_guess_format(tok, filename, NULL); av_free(tok); if (ctx->avc->oformat) break; if (*in) ++in; } } else ctx->avc->oformat = av_guess_format(NULL, filename, NULL); if (!ctx->avc->oformat) { encode_lavc_fail(ctx, "format not found\n"); return NULL; } av_strlcpy(ctx->avc->filename, filename, sizeof(ctx->avc->filename)); ctx->foptions = NULL; if (ctx->options->fopts) { char **p; for (p = ctx->options->fopts; *p; ++p) { if (!set_to_avdictionary(ctx, &ctx->foptions, NULL, *p)) MP_WARN(ctx, "could not set option %s\n", *p); } } if (ctx->options->vcodec) { char *tok; const char *in = ctx->options->vcodec; while (*in) { tok = av_get_token(&in, ","); ctx->vc = avcodec_find_encoder_by_name(tok); av_free(tok); if (ctx->vc && ctx->vc->type != AVMEDIA_TYPE_VIDEO) ctx->vc = NULL; if (ctx->vc) break; if (*in) ++in; } } else ctx->vc = avcodec_find_encoder(av_guess_codec(ctx->avc->oformat, NULL, ctx->avc->filename, NULL, AVMEDIA_TYPE_VIDEO)); if (ctx->options->acodec) { char *tok; const char *in = ctx->options->acodec; while (*in) { tok = av_get_token(&in, ","); ctx->ac = avcodec_find_encoder_by_name(tok); av_free(tok); if (ctx->ac && ctx->ac->type != AVMEDIA_TYPE_AUDIO) ctx->ac = NULL; if (ctx->ac) break; if (*in) ++in; } } else ctx->ac = avcodec_find_encoder(av_guess_codec(ctx->avc->oformat, NULL, ctx->avc->filename, NULL, AVMEDIA_TYPE_AUDIO)); if (!ctx->vc && !ctx->ac) { encode_lavc_fail( ctx, "neither audio nor video codec was found\n"); return NULL; } /* taken from ffmpeg unchanged * TODO turn this into an option if anyone needs this */ ctx->avc->max_delay = 0.7 * AV_TIME_BASE; ctx->abytes = 0; ctx->vbytes = 0; ctx->frames = 0; if (options->video_first) ctx->video_first = true; if (options->audio_first) ctx->audio_first = true; return ctx; }
/// Create a video writer object that uses FFMPEG inline bool CvVideoWriter_FFMPEG::open( const char * filename, int fourcc, double fps, int width, int height, bool is_color ) { icvInitFFMPEG_internal(); CodecID codec_id = CODEC_ID_NONE; int err, codec_pix_fmt; double bitrate_scale = 1; close(); // check arguments if( !filename ) return false; if(fps <= 0) return false; // we allow frames of odd width or height, but in this case we truncate // the rightmost column/the bottom row. Probably, this should be handled more elegantly, // but some internal functions inside FFMPEG swscale require even width/height. width &= -2; height &= -2; if( width <= 0 || height <= 0 ) return false; /* auto detect the output format from the name and fourcc code. */ #if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 2, 0) fmt = av_guess_format(NULL, filename, NULL); #else fmt = guess_format(NULL, filename, NULL); #endif if (!fmt) return false; /* determine optimal pixel format */ if (is_color) { input_pix_fmt = PIX_FMT_BGR24; } else { input_pix_fmt = PIX_FMT_GRAY8; } /* Lookup codec_id for given fourcc */ #if LIBAVCODEC_VERSION_INT<((51<<16)+(49<<8)+0) if( (codec_id = codec_get_bmp_id( fourcc )) == CODEC_ID_NONE ) return false; #else const struct AVCodecTag * tags[] = { codec_bmp_tags, NULL}; if( (codec_id = av_codec_get_id(tags, fourcc)) == CODEC_ID_NONE ) return false; #endif // alloc memory for context #if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 2, 0) oc = avformat_alloc_context(); #else oc = av_alloc_format_context(); #endif assert (oc); /* set file name */ oc->oformat = fmt; _snprintf(oc->filename, sizeof(oc->filename), "%s", filename); /* set some options */ oc->max_delay = (int)(0.7*AV_TIME_BASE); /* This reduces buffer underrun warnings with MPEG */ // set a few optimal pixel formats for lossless codecs of interest.. switch (codec_id) { #if LIBAVCODEC_VERSION_INT>((50<<16)+(1<<8)+0) case CODEC_ID_JPEGLS: // BGR24 or GRAY8 depending on is_color... codec_pix_fmt = input_pix_fmt; break; #endif case CODEC_ID_HUFFYUV: codec_pix_fmt = PIX_FMT_YUV422P; break; case CODEC_ID_MJPEG: case CODEC_ID_LJPEG: codec_pix_fmt = PIX_FMT_YUVJ420P; bitrate_scale = 3; break; case CODEC_ID_RAWVIDEO: codec_pix_fmt = input_pix_fmt == PIX_FMT_GRAY8 || input_pix_fmt == PIX_FMT_GRAY16LE || input_pix_fmt == PIX_FMT_GRAY16BE ? input_pix_fmt : PIX_FMT_YUV420P; break; default: // good for lossy formats, MPEG, etc. codec_pix_fmt = PIX_FMT_YUV420P; break; } double bitrate = MIN(bitrate_scale*fps*width*height, (double)INT_MAX/2); // TODO -- safe to ignore output audio stream? video_st = icv_add_video_stream_FFMPEG(oc, codec_id, width, height, (int)(bitrate + 0.5), fps, codec_pix_fmt); /* set the output parameters (must be done even if no parameters). */ #if LIBAVFORMAT_BUILD < CALC_FFMPEG_VERSION(53, 2, 0) if (av_set_parameters(oc, NULL) < 0) { return false; } #endif #if 0 #if FF_API_DUMP_FORMAT dump_format(oc, 0, filename, 1); #else av_dump_format(oc, 0, filename, 1); #endif #endif /* now that all the parameters are set, we can open the audio and video codecs and allocate the necessary encode buffers */ if (!video_st){ return false; } AVCodec *codec; AVCodecContext *c; #if LIBAVFORMAT_BUILD > 4628 c = (video_st->codec); #else c = &(video_st->codec); #endif c->codec_tag = fourcc; /* find the video encoder */ codec = avcodec_find_encoder(c->codec_id); if (!codec) { fprintf(stderr, "Could not find encoder for codec id %d: %s", c->codec_id, icvFFMPEGErrStr( #if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 2, 0) AVERROR_ENCODER_NOT_FOUND #else -1 #endif )); return false; } s64 lbit_rate = (s64)c->bit_rate; lbit_rate += (bitrate / 2); lbit_rate = std::min(lbit_rate, (s64)INT_MAX); c->bit_rate_tolerance = (int)lbit_rate; c->bit_rate = (int)lbit_rate; /* open the codec */ if ((err= #if LIBAVCODEC_VERSION_INT >= ((53<<16)+(8<<8)+0) avcodec_open2(c, codec, NULL) #else avcodec_open(c, codec) #endif ) < 0) { fprintf(stderr, "Could not open codec '%s': %s", codec->name, icvFFMPEGErrStr(err)); return false; } outbuf = NULL; if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) { /* allocate output buffer */ /* assume we will never get codec output with more than 4 bytes per pixel... */ outbuf_size = width*height*4; outbuf = (uint8_t *) av_malloc(outbuf_size); } bool need_color_convert; need_color_convert = (c->pix_fmt != input_pix_fmt); /* allocate the encoded raw picture */ picture = icv_alloc_picture_FFMPEG(c->pix_fmt, c->width, c->height, need_color_convert); if (!picture) { return false; } /* if the output format is not our input format, then a temporary picture of the input format is needed too. It is then converted to the required output format */ input_picture = NULL; if ( need_color_convert ) { input_picture = icv_alloc_picture_FFMPEG(input_pix_fmt, c->width, c->height, false); if (!input_picture) { return false; } } /* open the output file, if needed */ if (!(fmt->flags & AVFMT_NOFILE)) { #if LIBAVFORMAT_BUILD < CALC_FFMPEG_VERSION(53, 2, 0) if (url_fopen(&oc->pb, filename, URL_WRONLY) < 0) #else if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0) #endif { return false; } } #if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(52, 111, 0) /* write the stream header, if any */ err=avformat_write_header(oc, NULL); #else err=av_write_header( oc ); #endif if(err < 0) { close(); remove(filename); return false; } frame_width = width; frame_height = height; ok = true; return true; }
static int init(struct dec_audio *da, const char *decoder) { struct spdifContext *spdif_ctx = talloc_zero(NULL, struct spdifContext); da->priv = spdif_ctx; spdif_ctx->log = da->log; AVFormatContext *lavf_ctx = avformat_alloc_context(); if (!lavf_ctx) goto fail; lavf_ctx->oformat = av_guess_format("spdif", NULL, NULL); if (!lavf_ctx->oformat) goto fail; spdif_ctx->lavf_ctx = lavf_ctx; void *buffer = av_mallocz(OUTBUF_SIZE); if (!buffer) abort(); lavf_ctx->pb = avio_alloc_context(buffer, OUTBUF_SIZE, 1, spdif_ctx, NULL, write_packet, NULL); if (!lavf_ctx->pb) { av_free(buffer); goto fail; } // Request minimal buffering (not available on Libav) #if LIBAVFORMAT_VERSION_MICRO >= 100 lavf_ctx->pb->direct = 1; #endif AVStream *stream = avformat_new_stream(lavf_ctx, 0); if (!stream) goto fail; stream->codec->codec_id = mp_codec_to_av_codec_id(decoder); AVDictionary *format_opts = NULL; int num_channels = 0; int sample_format = 0; int samplerate = 0; switch (stream->codec->codec_id) { case AV_CODEC_ID_AAC: spdif_ctx->iec61937_packet_size = 16384; sample_format = AF_FORMAT_IEC61937_LE; samplerate = 48000; num_channels = 2; break; case AV_CODEC_ID_AC3: spdif_ctx->iec61937_packet_size = 6144; sample_format = AF_FORMAT_AC3_LE; samplerate = 48000; num_channels = 2; break; case AV_CODEC_ID_DTS: if (da->opts->dtshd) { av_dict_set(&format_opts, "dtshd_rate", "768000", 0); // 4*192000 spdif_ctx->iec61937_packet_size = 32768; sample_format = AF_FORMAT_IEC61937_LE; samplerate = 192000; num_channels = 2*4; } else { spdif_ctx->iec61937_packet_size = 32768; sample_format = AF_FORMAT_AC3_LE; samplerate = 48000; num_channels = 2; } break; case AV_CODEC_ID_EAC3: spdif_ctx->iec61937_packet_size = 24576; sample_format = AF_FORMAT_IEC61937_LE; samplerate = 192000; num_channels = 2; break; case AV_CODEC_ID_MP3: spdif_ctx->iec61937_packet_size = 4608; sample_format = AF_FORMAT_MPEG2; samplerate = 48000; num_channels = 2; break; case AV_CODEC_ID_TRUEHD: spdif_ctx->iec61937_packet_size = 61440; sample_format = AF_FORMAT_IEC61937_LE; samplerate = 192000; num_channels = 8; break; default: abort(); } mp_audio_set_num_channels(&da->decoded, num_channels); mp_audio_set_format(&da->decoded, sample_format); da->decoded.rate = samplerate; if (avformat_write_header(lavf_ctx, &format_opts) < 0) { MP_FATAL(da, "libavformat spdif initialization failed.\n"); av_dict_free(&format_opts); goto fail; } av_dict_free(&format_opts); spdif_ctx->need_close = true; return 1; fail: uninit(da); return 0; }
bool stream_remux(stream_t *stream, int (*write)(void *opaque, uint8_t *buf, int buf_size), void *opaque) { const char *format_name; AVOutputFormat *dst_format; AVFormatContext *dst_ctx; AVStream *dst_stream; uint8_t *dst_iobuf; AVIOContext *dst_ioctx; if (stream->dst_codec_type == CODEC_TYPE_MP3) { format_name = "mp3"; } else if (stream->dst_codec_type == CODEC_TYPE_OGG_VORBIS) { format_name = "ogg"; } else if (stream->dst_codec_type == CODEC_TYPE_FLAC) { format_name = "flac"; } else if (stream->dst_codec_type == CODEC_TYPE_AAC) { format_name = "aac"; } else if (stream->dst_codec_type == CODEC_TYPE_OPUS) { format_name = "opus"; } else { return false; } dst_format = av_guess_format(format_name, NULL, NULL); if (!dst_format) { musicd_log(LOG_ERROR, "stream", "can't find encoder for %s",format_name); return false; } dst_ctx = avformat_alloc_context(); dst_ctx->oformat = dst_format; dst_stream = avformat_new_stream(dst_ctx, NULL); if (!dst_stream) { musicd_log(LOG_ERROR, "stream", "avformat_new_stream failed"); avformat_free_context(dst_ctx); return false; } av_dict_set(&dst_ctx->metadata, "track", stringf("%02d", stream->track->track), AV_DICT_DONT_STRDUP_VAL); av_dict_set(&dst_ctx->metadata, "title", stream->track->title, 0); av_dict_set(&dst_ctx->metadata, "artist", stream->track->artist, 0); av_dict_set(&dst_ctx->metadata, "album", stream->track->album, 0); avcodec_copy_context(dst_stream->codec, stream->encoder); dst_iobuf = av_mallocz(4096); dst_ioctx = avio_alloc_context(dst_iobuf, 4096, 1, opaque, NULL, write, NULL); if (!dst_ioctx) { musicd_log(LOG_ERROR, "stream", "avio_alloc_context failed"); av_free(dst_iobuf); avformat_free_context(dst_ctx); return false; } dst_ctx->pb = dst_ioctx; stream->dst_ctx = dst_ctx; stream->dst_iobuf = dst_iobuf; stream->dst_ioctx = dst_ioctx; return true; }
glw_rec_t * glw_rec_init(const char *filename, int width, int height, int fps) { extern int concurrency; AVCodec *c; struct glw_rec *gr = calloc(1, sizeof(glw_rec_t)); gr->width = width; gr->height = height; gr->fps = fps; gr->fmt = av_guess_format(NULL, filename, NULL); if(gr->fmt == NULL) { TRACE(TRACE_ERROR, "GLWREC", "Unable to record to %s -- Unknown file format", filename); return NULL; } gr->oc = avformat_alloc_context(); gr->oc->oformat = gr->fmt; snprintf(gr->oc->filename, sizeof(gr->oc->filename), "%s", filename); gr->v_st = av_new_stream(gr->oc, 0); gr->v_ctx = gr->v_st->codec; gr->v_ctx->codec_type = AVMEDIA_TYPE_VIDEO; gr->v_ctx->codec_id = CODEC_ID_FFVHUFF; gr->v_ctx->width = width; gr->v_ctx->height = height; gr->v_ctx->time_base.den = fps; gr->v_ctx->time_base.num = 1; gr->v_ctx->pix_fmt = PIX_FMT_RGB32; gr->v_ctx->coder_type = 1; if(av_set_parameters(gr->oc, NULL) < 0) { TRACE(TRACE_ERROR, "GLWREC", "Unable to record to %s -- Invalid output format parameters", filename); return NULL; } dump_format(gr->oc, 0, filename, 1); c = avcodec_find_encoder(gr->v_ctx->codec_id); if(avcodec_open(gr->v_ctx, c)) { TRACE(TRACE_ERROR, "GLWREC", "Unable to record to %s -- Unable to open video codec", filename); return NULL; } gr->v_ctx->thread_count = concurrency; if(url_fopen(&gr->oc->pb, filename, URL_WRONLY) < 0) { TRACE(TRACE_ERROR, "GLWREC", "Unable to record to %s -- Unable to open file for writing", filename); return NULL; } /* write the stream header, if any */ av_write_header(gr->oc); gr->vbuf_size = 2000000; gr->vbuf_ptr = av_malloc(gr->vbuf_size); return gr; }
static int start_ffmpeg_impl(struct RenderData *rd, int rectx, int recty, ReportList *reports) { /* Handle to the output file */ AVFormatContext* of; AVOutputFormat* fmt; char name[256]; const char ** exts; ffmpeg_type = rd->ffcodecdata.type; ffmpeg_codec = rd->ffcodecdata.codec; ffmpeg_audio_codec = rd->ffcodecdata.audio_codec; ffmpeg_video_bitrate = rd->ffcodecdata.video_bitrate; ffmpeg_audio_bitrate = rd->ffcodecdata.audio_bitrate; ffmpeg_gop_size = rd->ffcodecdata.gop_size; ffmpeg_autosplit = rd->ffcodecdata.flags & FFMPEG_AUTOSPLIT_OUTPUT; do_init_ffmpeg(); /* Determine the correct filename */ filepath_ffmpeg(name, rd); fprintf(stderr, "Starting output to %s(ffmpeg)...\n" " Using type=%d, codec=%d, audio_codec=%d,\n" " video_bitrate=%d, audio_bitrate=%d,\n" " gop_size=%d, autosplit=%d\n" " render width=%d, render height=%d\n", name, ffmpeg_type, ffmpeg_codec, ffmpeg_audio_codec, ffmpeg_video_bitrate, ffmpeg_audio_bitrate, ffmpeg_gop_size, ffmpeg_autosplit, rectx, recty); exts = get_file_extensions(ffmpeg_type); if (!exts) { BKE_report(reports, RPT_ERROR, "No valid formats found."); return 0; } fmt = av_guess_format(NULL, exts[0], NULL); if (!fmt) { BKE_report(reports, RPT_ERROR, "No valid formats found."); return 0; } of = avformat_alloc_context(); if (!of) { BKE_report(reports, RPT_ERROR, "Error opening output file"); return 0; } of->oformat = fmt; of->packet_size= rd->ffcodecdata.mux_packet_size; if (ffmpeg_audio_codec != CODEC_ID_NONE) { of->mux_rate = rd->ffcodecdata.mux_rate; } else { of->mux_rate = 0; } of->preload = (int)(0.5*AV_TIME_BASE); of->max_delay = (int)(0.7*AV_TIME_BASE); fmt->audio_codec = ffmpeg_audio_codec; BLI_snprintf(of->filename, sizeof(of->filename), "%s", name); /* set the codec to the user's selection */ switch(ffmpeg_type) { case FFMPEG_AVI: case FFMPEG_MOV: case FFMPEG_MKV: fmt->video_codec = ffmpeg_codec; break; case FFMPEG_OGG: fmt->video_codec = CODEC_ID_THEORA; break; case FFMPEG_DV: fmt->video_codec = CODEC_ID_DVVIDEO; break; case FFMPEG_MPEG1: fmt->video_codec = CODEC_ID_MPEG1VIDEO; break; case FFMPEG_MPEG2: fmt->video_codec = CODEC_ID_MPEG2VIDEO; break; case FFMPEG_H264: fmt->video_codec = CODEC_ID_H264; break; case FFMPEG_XVID: fmt->video_codec = CODEC_ID_MPEG4; break; case FFMPEG_FLV: fmt->video_codec = CODEC_ID_FLV1; break; case FFMPEG_MP3: fmt->audio_codec = CODEC_ID_MP3; case FFMPEG_WAV: fmt->video_codec = CODEC_ID_NONE; break; case FFMPEG_MPEG4: default: fmt->video_codec = CODEC_ID_MPEG4; break; } if (fmt->video_codec == CODEC_ID_DVVIDEO) { if (rectx != 720) { BKE_report(reports, RPT_ERROR, "Render width has to be 720 pixels for DV!"); return 0; } if (rd->frs_sec != 25 && recty != 480) { BKE_report(reports, RPT_ERROR, "Render height has to be 480 pixels for DV-NTSC!"); return 0; } if (rd->frs_sec == 25 && recty != 576) { BKE_report(reports, RPT_ERROR, "Render height has to be 576 pixels for DV-PAL!"); return 0; } } if (ffmpeg_type == FFMPEG_DV) { fmt->audio_codec = CODEC_ID_PCM_S16LE; if (ffmpeg_audio_codec != CODEC_ID_NONE && rd->ffcodecdata.audio_mixrate != 48000 && rd->ffcodecdata.audio_channels != 2) { BKE_report(reports, RPT_ERROR, "FFMPEG only supports 48khz / stereo audio for DV!"); return 0; } } if (fmt->video_codec != CODEC_ID_NONE) { video_stream = alloc_video_stream(rd, fmt->video_codec, of, rectx, recty); printf("alloc video stream %p\n", video_stream); if (!video_stream) { BKE_report(reports, RPT_ERROR, "Error initializing video stream."); return 0; } } if (ffmpeg_audio_codec != CODEC_ID_NONE) { audio_stream = alloc_audio_stream(rd, fmt->audio_codec, of); if (!audio_stream) { BKE_report(reports, RPT_ERROR, "Error initializing audio stream."); return 0; } } if (av_set_parameters(of, NULL) < 0) { BKE_report(reports, RPT_ERROR, "Error setting output parameters."); return 0; } if (!(fmt->flags & AVFMT_NOFILE)) { if (avio_open(&of->pb, name, AVIO_FLAG_WRITE) < 0) { BKE_report(reports, RPT_ERROR, "Could not open file for writing."); return 0; } } if (av_write_header(of) < 0) { BKE_report(reports, RPT_ERROR, "Could not initialize streams. Probably unsupported codec combination."); return 0; } outfile = of; av_dump_format(of, 0, name, 1); return 1; }
/********************************************************************** * avformatInit ********************************************************************** * Allocates hb_mux_data_t structures, create file and write headers *********************************************************************/ static int avformatInit( hb_mux_object_t * m ) { hb_job_t * job = m->job; hb_audio_t * audio; hb_mux_data_t * track; int meta_mux; int max_tracks; int ii, ret; const char *muxer_name = NULL; uint8_t default_track_flag = 1; uint8_t need_fonts = 0; char *lang; max_tracks = 1 + hb_list_count( job->list_audio ) + hb_list_count( job->list_subtitle ); m->tracks = calloc(max_tracks, sizeof(hb_mux_data_t*)); m->oc = avformat_alloc_context(); if (m->oc == NULL) { hb_error( "Could not initialize avformat context." ); goto error; } AVDictionary * av_opts = NULL; switch (job->mux) { case HB_MUX_AV_MP4: m->time_base.num = 1; m->time_base.den = 90000; if( job->ipod_atom ) muxer_name = "ipod"; else muxer_name = "mp4"; meta_mux = META_MUX_MP4; av_dict_set(&av_opts, "brand", "mp42", 0); if (job->mp4_optimize) av_dict_set(&av_opts, "movflags", "faststart+disable_chpl", 0); else av_dict_set(&av_opts, "movflags", "+disable_chpl", 0); break; case HB_MUX_AV_MKV: // libavformat is essentially hard coded such that it only // works with a timebase of 1/1000 m->time_base.num = 1; m->time_base.den = 1000; muxer_name = "matroska"; meta_mux = META_MUX_MKV; break; default: { hb_error("Invalid Mux %x", job->mux); goto error; } } m->oc->oformat = av_guess_format(muxer_name, NULL, NULL); if(m->oc->oformat == NULL) { hb_error("Could not guess output format %s", muxer_name); goto error; } av_strlcpy(m->oc->filename, job->file, sizeof(m->oc->filename)); ret = avio_open2(&m->oc->pb, job->file, AVIO_FLAG_WRITE, &m->oc->interrupt_callback, NULL); if( ret < 0 ) { hb_error( "avio_open2 failed, errno %d", ret); goto error; } /* Video track */ track = m->tracks[m->ntracks++] = calloc(1, sizeof( hb_mux_data_t ) ); job->mux_data = track; track->type = MUX_TYPE_VIDEO; track->prev_chapter_tc = AV_NOPTS_VALUE; track->st = avformat_new_stream(m->oc, NULL); if (track->st == NULL) { hb_error("Could not initialize video stream"); goto error; } track->st->time_base = m->time_base; avcodec_get_context_defaults3(track->st->codec, NULL); track->st->codec->codec_type = AVMEDIA_TYPE_VIDEO; track->st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; uint8_t *priv_data = NULL; int priv_size = 0; switch (job->vcodec) { case HB_VCODEC_X264: case HB_VCODEC_QSV_H264: track->st->codec->codec_id = AV_CODEC_ID_H264; /* Taken from x264 muxers.c */ priv_size = 5 + 1 + 2 + job->config.h264.sps_length + 1 + 2 + job->config.h264.pps_length; priv_data = av_malloc(priv_size); if (priv_data == NULL) { hb_error("H.264 extradata: malloc failure"); goto error; } priv_data[0] = 1; priv_data[1] = job->config.h264.sps[1]; /* AVCProfileIndication */ priv_data[2] = job->config.h264.sps[2]; /* profile_compat */ priv_data[3] = job->config.h264.sps[3]; /* AVCLevelIndication */ priv_data[4] = 0xff; // nalu size length is four bytes priv_data[5] = 0xe1; // one sps priv_data[6] = job->config.h264.sps_length >> 8; priv_data[7] = job->config.h264.sps_length; memcpy(priv_data+8, job->config.h264.sps, job->config.h264.sps_length); priv_data[8+job->config.h264.sps_length] = 1; // one pps priv_data[9+job->config.h264.sps_length] = job->config.h264.pps_length >> 8; priv_data[10+job->config.h264.sps_length] = job->config.h264.pps_length; memcpy(priv_data+11+job->config.h264.sps_length, job->config.h264.pps, job->config.h264.pps_length ); break; case HB_VCODEC_FFMPEG_MPEG4: track->st->codec->codec_id = AV_CODEC_ID_MPEG4; if (job->config.mpeg4.length != 0) { priv_size = job->config.mpeg4.length; priv_data = av_malloc(priv_size); if (priv_data == NULL) { hb_error("MPEG4 extradata: malloc failure"); goto error; } memcpy(priv_data, job->config.mpeg4.bytes, priv_size); } break; case HB_VCODEC_FFMPEG_MPEG2: track->st->codec->codec_id = AV_CODEC_ID_MPEG2VIDEO; if (job->config.mpeg4.length != 0) { priv_size = job->config.mpeg4.length; priv_data = av_malloc(priv_size); if (priv_data == NULL) { hb_error("MPEG2 extradata: malloc failure"); goto error; } memcpy(priv_data, job->config.mpeg4.bytes, priv_size); } break; case HB_VCODEC_FFMPEG_VP8: track->st->codec->codec_id = AV_CODEC_ID_VP8; priv_data = NULL; priv_size = 0; break; case HB_VCODEC_THEORA: { track->st->codec->codec_id = AV_CODEC_ID_THEORA; int size = 0; ogg_packet *ogg_headers[3]; for (ii = 0; ii < 3; ii++) { ogg_headers[ii] = (ogg_packet *)job->config.theora.headers[ii]; size += ogg_headers[ii]->bytes + 2; } priv_size = size; priv_data = av_malloc(priv_size); if (priv_data == NULL) { hb_error("Theora extradata: malloc failure"); goto error; } size = 0; for(ii = 0; ii < 3; ii++) { AV_WB16(priv_data + size, ogg_headers[ii]->bytes); size += 2; memcpy(priv_data+size, ogg_headers[ii]->packet, ogg_headers[ii]->bytes); size += ogg_headers[ii]->bytes; } } break; case HB_VCODEC_X265: track->st->codec->codec_id = AV_CODEC_ID_HEVC; if (job->config.h265.headers_length > 0) { priv_size = job->config.h265.headers_length; priv_data = av_malloc(priv_size); if (priv_data == NULL) { hb_error("H.265 extradata: malloc failure"); goto error; } memcpy(priv_data, job->config.h265.headers, priv_size); } break; default: hb_error("muxavformat: Unknown video codec: %x", job->vcodec); goto error; } track->st->codec->extradata = priv_data; track->st->codec->extradata_size = priv_size; track->st->sample_aspect_ratio.num = job->par.num; track->st->sample_aspect_ratio.den = job->par.den; track->st->codec->sample_aspect_ratio.num = job->par.num; track->st->codec->sample_aspect_ratio.den = job->par.den; track->st->codec->width = job->width; track->st->codec->height = job->height; track->st->disposition |= AV_DISPOSITION_DEFAULT; hb_rational_t vrate; if( job->pass_id == HB_PASS_ENCODE_2ND ) { hb_interjob_t * interjob = hb_interjob_get( job->h ); vrate = interjob->vrate; } else { vrate = job->vrate; } // If the vrate is 27000000, there's a good chance this is // a standard rate that we have in our hb_video_rates table. // Because of rounding errors and approximations made while // measuring framerate, the actual value may not be exact. So // we look for rates that are "close" and make an adjustment // to fps.den. if (vrate.num == 27000000) { const hb_rate_t *video_framerate = NULL; while ((video_framerate = hb_video_framerate_get_next(video_framerate)) != NULL) { if (abs(vrate.den - video_framerate->rate) < 10) { vrate.den = video_framerate->rate; break; } } } hb_reduce(&vrate.num, &vrate.den, vrate.num, vrate.den); if (job->mux == HB_MUX_AV_MP4) { // libavformat mp4 muxer requires that the codec time_base have the // same denominator as the stream time_base, it uses it for the // mdhd timescale. double scale = (double)track->st->time_base.den / vrate.num; track->st->codec->time_base.den = track->st->time_base.den; track->st->codec->time_base.num = vrate.den * scale; } else { track->st->codec->time_base.num = vrate.den; track->st->codec->time_base.den = vrate.num; } track->st->avg_frame_rate.num = vrate.num; track->st->avg_frame_rate.den = vrate.den; /* add the audio tracks */ for(ii = 0; ii < hb_list_count( job->list_audio ); ii++ ) { audio = hb_list_item( job->list_audio, ii ); track = m->tracks[m->ntracks++] = calloc(1, sizeof( hb_mux_data_t ) ); audio->priv.mux_data = track; track->type = MUX_TYPE_AUDIO; track->st = avformat_new_stream(m->oc, NULL); if (track->st == NULL) { hb_error("Could not initialize audio stream"); goto error; } avcodec_get_context_defaults3(track->st->codec, NULL); track->st->codec->codec_type = AVMEDIA_TYPE_AUDIO; track->st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; if (job->mux == HB_MUX_AV_MP4) { track->st->codec->time_base.num = audio->config.out.samples_per_frame; track->st->codec->time_base.den = audio->config.out.samplerate; track->st->time_base.num = 1; track->st->time_base.den = audio->config.out.samplerate; } else { track->st->codec->time_base = m->time_base; track->st->time_base = m->time_base; } priv_data = NULL; priv_size = 0; switch (audio->config.out.codec & HB_ACODEC_MASK) { case HB_ACODEC_DCA: case HB_ACODEC_DCA_HD: track->st->codec->codec_id = AV_CODEC_ID_DTS; break; case HB_ACODEC_AC3: track->st->codec->codec_id = AV_CODEC_ID_AC3; break; case HB_ACODEC_FFEAC3: track->st->codec->codec_id = AV_CODEC_ID_EAC3; break; case HB_ACODEC_FFTRUEHD: track->st->codec->codec_id = AV_CODEC_ID_TRUEHD; break; case HB_ACODEC_LAME: case HB_ACODEC_MP3: track->st->codec->codec_id = AV_CODEC_ID_MP3; break; case HB_ACODEC_VORBIS: { track->st->codec->codec_id = AV_CODEC_ID_VORBIS; int jj, size = 0; ogg_packet *ogg_headers[3]; for (jj = 0; jj < 3; jj++) { ogg_headers[jj] = (ogg_packet *)audio->priv.config.vorbis.headers[jj]; size += ogg_headers[jj]->bytes + 2; } priv_size = size; priv_data = av_malloc(priv_size); if (priv_data == NULL) { hb_error("Vorbis extradata: malloc failure"); goto error; } size = 0; for(jj = 0; jj < 3; jj++) { AV_WB16(priv_data + size, ogg_headers[jj]->bytes); size += 2; memcpy(priv_data+size, ogg_headers[jj]->packet, ogg_headers[jj]->bytes); size += ogg_headers[jj]->bytes; } } break; case HB_ACODEC_FFFLAC: case HB_ACODEC_FFFLAC24: track->st->codec->codec_id = AV_CODEC_ID_FLAC; if (audio->priv.config.extradata.length) { priv_size = audio->priv.config.extradata.length; priv_data = av_malloc(priv_size); if (priv_data == NULL) { hb_error("FLAC extradata: malloc failure"); goto error; } memcpy(priv_data, audio->priv.config.extradata.bytes, audio->priv.config.extradata.length); } break; case HB_ACODEC_FFAAC: case HB_ACODEC_CA_AAC: case HB_ACODEC_CA_HAAC: case HB_ACODEC_FDK_AAC: case HB_ACODEC_FDK_HAAC: track->st->codec->codec_id = AV_CODEC_ID_AAC; // libav mkv muxer expects there to be extradata for // AAC and will crash if it is NULL. So allocate extra // byte so that av_malloc does not return NULL when length // is 0. priv_size = audio->priv.config.extradata.length; priv_data = av_malloc(priv_size + 1); if (priv_data == NULL) { hb_error("AAC extradata: malloc failure"); goto error; } memcpy(priv_data, audio->priv.config.extradata.bytes, audio->priv.config.extradata.length); // AAC from pass-through source may be ADTS. // Therefore inserting "aac_adtstoasc" bitstream filter is // preferred. // The filter does nothing for non-ADTS bitstream. if (audio->config.out.codec == HB_ACODEC_AAC_PASS) { track->bitstream_filter = av_bitstream_filter_init("aac_adtstoasc"); } break; default: hb_error("muxavformat: Unknown audio codec: %x", audio->config.out.codec); goto error; } track->st->codec->extradata = priv_data; track->st->codec->extradata_size = priv_size; if( default_track_flag ) { track->st->disposition |= AV_DISPOSITION_DEFAULT; default_track_flag = 0; } lang = lookup_lang_code(job->mux, audio->config.lang.iso639_2 ); if (lang != NULL) { av_dict_set(&track->st->metadata, "language", lang, 0); } track->st->codec->sample_rate = audio->config.out.samplerate; if (audio->config.out.codec & HB_ACODEC_PASS_FLAG) { track->st->codec->channels = av_get_channel_layout_nb_channels(audio->config.in.channel_layout); track->st->codec->channel_layout = audio->config.in.channel_layout; } else { track->st->codec->channels = hb_mixdown_get_discrete_channel_count(audio->config.out.mixdown); track->st->codec->channel_layout = hb_ff_mixdown_xlat(audio->config.out.mixdown, NULL); } char *name; if (audio->config.out.name == NULL) { switch (track->st->codec->channels) { case 1: name = "Mono"; break; case 2: name = "Stereo"; break; default: name = "Surround"; break; } } else { name = audio->config.out.name; } // Set audio track title av_dict_set(&track->st->metadata, "title", name, 0); if (job->mux == HB_MUX_AV_MP4) { // Some software (MPC, mediainfo) use hdlr description // for track title av_dict_set(&track->st->metadata, "handler", name, 0); } } char * subidx_fmt = "size: %dx%d\n" "org: %d, %d\n" "scale: 100%%, 100%%\n" "alpha: 100%%\n" "smooth: OFF\n" "fadein/out: 50, 50\n" "align: OFF at LEFT TOP\n" "time offset: 0\n" "forced subs: %s\n" "palette: %06x, %06x, %06x, %06x, %06x, %06x, " "%06x, %06x, %06x, %06x, %06x, %06x, %06x, %06x, %06x, %06x\n" "custom colors: OFF, tridx: 0000, " "colors: 000000, 000000, 000000, 000000\n"; int subtitle_default = -1; for( ii = 0; ii < hb_list_count( job->list_subtitle ); ii++ ) { hb_subtitle_t *subtitle = hb_list_item( job->list_subtitle, ii ); if( subtitle->config.dest == PASSTHRUSUB ) { if ( subtitle->config.default_track ) subtitle_default = ii; } } // Quicktime requires that at least one subtitle is enabled, // else it doesn't show any of the subtitles. // So check to see if any of the subtitles are flagged to be // the defualt. The default will the the enabled track, else // enable the first track. if (job->mux == HB_MUX_AV_MP4 && subtitle_default == -1) { subtitle_default = 0; } for( ii = 0; ii < hb_list_count( job->list_subtitle ); ii++ ) { hb_subtitle_t * subtitle; uint32_t rgb[16]; char subidx[2048]; int len; subtitle = hb_list_item( job->list_subtitle, ii ); if (subtitle->config.dest != PASSTHRUSUB) continue; track = m->tracks[m->ntracks++] = calloc(1, sizeof( hb_mux_data_t ) ); subtitle->mux_data = track; track->type = MUX_TYPE_SUBTITLE; track->st = avformat_new_stream(m->oc, NULL); if (track->st == NULL) { hb_error("Could not initialize subtitle stream"); goto error; } avcodec_get_context_defaults3(track->st->codec, NULL); track->st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE; track->st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; track->st->time_base = m->time_base; track->st->codec->time_base = m->time_base; track->st->codec->width = subtitle->width; track->st->codec->height = subtitle->height; priv_data = NULL; priv_size = 0; switch (subtitle->source) { case VOBSUB: { int jj; track->st->codec->codec_id = AV_CODEC_ID_DVD_SUBTITLE; for (jj = 0; jj < 16; jj++) rgb[jj] = hb_yuv2rgb(subtitle->palette[jj]); len = snprintf(subidx, 2048, subidx_fmt, subtitle->width, subtitle->height, 0, 0, "OFF", rgb[0], rgb[1], rgb[2], rgb[3], rgb[4], rgb[5], rgb[6], rgb[7], rgb[8], rgb[9], rgb[10], rgb[11], rgb[12], rgb[13], rgb[14], rgb[15]); priv_size = len + 1; priv_data = av_malloc(priv_size); if (priv_data == NULL) { hb_error("VOBSUB extradata: malloc failure"); goto error; } memcpy(priv_data, subidx, priv_size); } break; case PGSSUB: { track->st->codec->codec_id = AV_CODEC_ID_HDMV_PGS_SUBTITLE; } break; case CC608SUB: case CC708SUB: case TX3GSUB: case SRTSUB: case UTF8SUB: case SSASUB: { if (job->mux == HB_MUX_AV_MP4) { track->st->codec->codec_id = AV_CODEC_ID_MOV_TEXT; } else { track->st->codec->codec_id = AV_CODEC_ID_SSA; need_fonts = 1; if (subtitle->extradata_size) { priv_size = subtitle->extradata_size; priv_data = av_malloc(priv_size); if (priv_data == NULL) { hb_error("SSA extradata: malloc failure"); goto error; } memcpy(priv_data, subtitle->extradata, priv_size); } } } break; default: continue; } if (track->st->codec->codec_id == AV_CODEC_ID_MOV_TEXT) { // Build codec extradata for tx3g. // If we were using a libav codec to generate this data // this would (or should) be done for us. uint8_t properties[] = { 0x00, 0x00, 0x00, 0x00, // Display Flags 0x01, // Horiz. Justification 0xff, // Vert. Justification 0x00, 0x00, 0x00, 0xff, // Bg color 0x00, 0x00, 0x00, 0x00, // Default text box 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Reserved 0x00, 0x01, // Font ID 0x00, // Font face 0x18, // Font size 0xff, 0xff, 0xff, 0xff, // Fg color // Font table: 0x00, 0x00, 0x00, 0x12, // Font table size 'f','t','a','b', // Tag 0x00, 0x01, // Count 0x00, 0x01, // Font ID 0x05, // Font name length 'A','r','i','a','l' // Font name }; int width, height = 60; width = job->width * job->par.num / job->par.den; track->st->codec->width = width; track->st->codec->height = height; properties[14] = height >> 8; properties[15] = height & 0xff; properties[16] = width >> 8; properties[17] = width & 0xff; priv_size = sizeof(properties); priv_data = av_malloc(priv_size); if (priv_data == NULL) { hb_error("TX3G extradata: malloc failure"); goto error; } memcpy(priv_data, properties, priv_size); } track->st->codec->extradata = priv_data; track->st->codec->extradata_size = priv_size; if (ii == subtitle_default) { track->st->disposition |= AV_DISPOSITION_DEFAULT; } if (subtitle->config.default_track) { track->st->disposition |= AV_DISPOSITION_FORCED; } lang = lookup_lang_code(job->mux, subtitle->iso639_2 ); if (lang != NULL) { av_dict_set(&track->st->metadata, "language", lang, 0); } }
static int rtp_new_av_stream(RTSPContext *ctx, struct sockaddr_in *sin, int streamid, enum CodecID codecid) { AVOutputFormat *fmt = NULL; AVFormatContext *fmtctx = NULL; AVStream *stream = NULL; AVCodecContext *encoder = NULL; uint8_t *dummybuf = NULL; // if(streamid > IMAGE_SOURCE_CHANNEL_MAX) { ga_error("invalid stream index (%d > %d)\n", streamid, IMAGE_SOURCE_CHANNEL_MAX); return -1; } if(codecid != rtspconf->video_encoder_codec->id && codecid != rtspconf->audio_encoder_codec->id) { ga_error("invalid codec (%d)\n", codecid); return -1; } if(ctx->fmtctx[streamid] != NULL) { ga_error("duplicated setup to an existing stream (%d)\n", streamid); return -1; } if((fmt = av_guess_format("rtp", NULL, NULL)) == NULL) { ga_error("RTP not supported.\n"); return -1; } if((fmtctx = avformat_alloc_context()) == NULL) { ga_error("create avformat context failed.\n"); return -1; } fmtctx->oformat = fmt; if(ctx->lower_transport[streamid] == RTSP_LOWER_TRANSPORT_UDP) { snprintf(fmtctx->filename, sizeof(fmtctx->filename), "rtp://%s:%d", inet_ntoa(sin->sin_addr), ntohs(sin->sin_port)); if(avio_open(&fmtctx->pb, fmtctx->filename, AVIO_FLAG_WRITE) < 0) { ga_error("cannot open URL: %s\n", fmtctx->filename); return -1; } ga_error("RTP/UDP: URL opened [%d]: %s, max_packet_size=%d\n", streamid, fmtctx->filename, fmtctx->pb->max_packet_size); } else if(ctx->lower_transport[streamid] == RTSP_LOWER_TRANSPORT_TCP) { // XXX: should we use avio_open_dyn_buf(&fmtctx->pb)? if(ffio_open_dyn_packet_buf(&fmtctx->pb, RTSP_TCP_MAX_PACKET_SIZE) < 0) { ga_error("cannot open dynamic packet buffer\n"); return -1; } ga_error("RTP/TCP: Dynamic buffer opened, max_packet_size=%d.\n", fmtctx->pb->max_packet_size); } fmtctx->pb->seekable = 0; // if((stream = ga_avformat_new_stream(fmtctx, 0, codecid == rtspconf->video_encoder_codec->id ? rtspconf->video_encoder_codec : rtspconf->audio_encoder_codec)) == NULL) { ga_error("Cannot create new stream (%d)\n", codecid); return -1; } //#ifndef SHARE_ENCODER if(codecid == rtspconf->video_encoder_codec->id) { encoder = ga_avcodec_vencoder_init( stream->codec, rtspconf->video_encoder_codec, video_source_width(streamid), video_source_height(streamid), rtspconf->video_fps, rtspconf->vso); } else if(codecid == rtspconf->audio_encoder_codec->id) { encoder = ga_avcodec_aencoder_init( stream->codec, rtspconf->audio_encoder_codec, rtspconf->audio_bitrate, rtspconf->audio_samplerate, rtspconf->audio_channels, rtspconf->audio_codec_format, rtspconf->audio_codec_channel_layout); } if(encoder == NULL) { ga_error("Cannot init encoder\n"); return -1; } //#endif /* SHARE_ENCODER */ // ctx->encoder[streamid] = encoder; ctx->stream[streamid] = stream; ctx->fmtctx[streamid] = fmtctx; // write header if(avformat_write_header(ctx->fmtctx[streamid], NULL) < 0) { ga_error("Cannot write stream id %d.\n", streamid); return -1; } if(ctx->lower_transport[streamid] == RTSP_LOWER_TRANSPORT_TCP) { int rlen; rlen = avio_close_dyn_buf(ctx->fmtctx[streamid]->pb, &dummybuf); av_free(dummybuf); } // return 0; }
ExportFFmpeg::ExportFFmpeg() : ExportPlugin() { mEncFormatDesc = NULL; // describes our output file to libavformat mEncAudioStream = NULL; // the output audio stream (may remain NULL) #define MAX_AUDIO_PACKET_SIZE (128 * 1024) mEncAudioFifoOutBufSiz = 0; mSampleRate = 0; mSupportsUTF8 = true; PickFFmpegLibs(); // DropFFmpegLibs() call is in ExportFFmpeg destructor int avfver = FFmpegLibsInst->ValidLibsLoaded() ? avformat_version() : 0; int newfmt; // Adds export types from the export type list for (newfmt = 0; newfmt < FMT_LAST; newfmt++) { wxString shortname(ExportFFmpegOptions::fmts[newfmt].shortname); //Don't hide export types when there's no av-libs, and don't hide FMT_OTHER if (newfmt < FMT_OTHER && FFmpegLibsInst->ValidLibsLoaded()) { // Format/Codec support is compiled in? AVOutputFormat *avoformat = av_guess_format(shortname.mb_str(), NULL, NULL); AVCodec *avcodec = avcodec_find_encoder(ExportFFmpegOptions::fmts[newfmt].codecid); if (avoformat == NULL || avcodec == NULL) { ExportFFmpegOptions::fmts[newfmt].compiledIn = false; continue; } } int fmtindex = AddFormat() - 1; SetFormat(ExportFFmpegOptions::fmts[newfmt].name,fmtindex); AddExtension(ExportFFmpegOptions::fmts[newfmt].extension,fmtindex); // For some types add other extensions switch(newfmt) { case FMT_M4A: AddExtension(wxString(wxT("3gp")),fmtindex); AddExtension(wxString(wxT("m4r")),fmtindex); AddExtension(wxString(wxT("mp4")),fmtindex); break; case FMT_WMA2: AddExtension(wxString(wxT("asf")),fmtindex); AddExtension(wxString(wxT("wmv")),fmtindex); break; default: break; } SetMaxChannels(ExportFFmpegOptions::fmts[newfmt].maxchannels,fmtindex); SetDescription(ExportFFmpegOptions::fmts[newfmt].description,fmtindex); int canmeta = ExportFFmpegOptions::fmts[newfmt].canmetadata; if (canmeta && (canmeta == AV_VERSION_INT(-1,-1,-1) || canmeta <= avfver)) { SetCanMetaData(true,fmtindex); } else { SetCanMetaData(false,fmtindex); } } }
bool ExportFFmpeg::Init(const char *shortname, AudacityProject *project, Tags *metadata, int subformat) { int err; //FFmpegLibsInst->LoadLibs(NULL,true); //Loaded at startup or from Prefs now if (!FFmpegLibsInst->ValidLibsLoaded()) return false; av_log_set_callback(av_log_wx_callback); // See if libavformat has modules that can write our output format. If so, mEncFormatDesc // will describe the functions used to write the format (used internally by libavformat) // and the default video/audio codecs that the format uses. if ((mEncFormatDesc = av_guess_format(shortname, OSINPUT(mName), NULL)) == NULL) { wxMessageBox(wxString::Format(_("FFmpeg : ERROR - Can't determine format description for file \"%s\"."), mName.c_str()), _("FFmpeg Error"), wxOK|wxCENTER|wxICON_EXCLAMATION); return false; } // mEncFormatCtx is used by libavformat to carry around context data re our output file. if ((mEncFormatCtx = avformat_alloc_context()) == NULL) { wxMessageBox(wxString::Format(_("FFmpeg : ERROR - Can't allocate output format context.")), _("FFmpeg Error"), wxOK|wxCENTER|wxICON_EXCLAMATION); return false; } // Initialise the output format context. mEncFormatCtx->oformat = mEncFormatDesc; memcpy(mEncFormatCtx->filename, OSINPUT(mName), strlen(OSINPUT(mName))+1); // At the moment Audacity can export only one audio stream if ((mEncAudioStream = avformat_new_stream(mEncFormatCtx, NULL)) == NULL) { wxMessageBox(wxString::Format(_("FFmpeg : ERROR - Can't add audio stream to output file \"%s\"."), mName.c_str()), _("FFmpeg Error"), wxOK|wxCENTER|wxICON_EXCLAMATION); return false; } mEncAudioStream->id = 0; // Open the output file. if (!(mEncFormatDesc->flags & AVFMT_NOFILE)) { if ((err = ufile_fopen(&mEncFormatCtx->pb, mName, AVIO_FLAG_WRITE)) < 0) { wxMessageBox(wxString::Format(wxT("FFmpeg : ERROR - Can't open output file \"%s\" to write. Error code is %d."), mName.c_str(), err), _("FFmpeg Error"), wxOK|wxCENTER|wxICON_EXCLAMATION); return false; } } // Open the audio stream's codec and initialise any stream related data. if (!InitCodecs(project)) return false; if (metadata == NULL) metadata = project->GetTags(); // Add metadata BEFORE writing the header. // At the moment that works with ffmpeg-git and ffmpeg-0.5 for MP4. if (GetCanMetaData(subformat)) { mSupportsUTF8 = ExportFFmpegOptions::fmts[mSubFormat].canutf8; AddTags(metadata); } // Write headers to the output file. if ((err = avformat_write_header(mEncFormatCtx, NULL)) < 0) { wxMessageBox(wxString::Format(_("FFmpeg : ERROR - Can't write headers to output file \"%s\". Error code is %d."), mName.c_str(),err), _("FFmpeg Error"), wxOK|wxCENTER|wxICON_EXCLAMATION); return false; } return true; }