int openAVDumping(void* window, bool video_opengl, char* dumpfile, int sf) { if (tasflags.framerate <= 0) { debuglog(LCF_DUMP | LCF_ERROR, "Not supporting non deterministic timer"); return 1; } start_frame = sf; accum_samples = 0; int width, height; AVPixelFormat pixfmt = initVideoCapture(window, video_opengl, &width, &height); if (pixfmt == AV_PIX_FMT_NONE) { debuglog(LCF_DUMP | LCF_ERROR, "Unable to initialize video capture"); return 1; } /* Initialize AVCodec and AVFormat libraries */ av_register_all(); /* Initialize AVOutputFormat */ outputFormat = av_guess_format(NULL, dumpfile, NULL); if (!outputFormat) { debuglog(LCF_DUMP | LCF_ERROR, "Could not find suitable output format for file ", dumpfile); return 1; } /* Initialize AVFormatContext */ formatContext = avformat_alloc_context(); if (!formatContext) { debuglog(LCF_DUMP | LCF_ERROR, "Could not initialize AVFormatContext"); return 1; } formatContext->oformat = outputFormat; /*** Create video stream ***/ /* Initialize video AVCodec */ AVCodec *video_codec = NULL; AVCodecID codec_id = AV_CODEC_ID_MPEG4; //int codec_id = AV_CODEC_ID_H264; video_codec = avcodec_find_encoder(codec_id); if (!video_codec) { debuglog(LCF_DUMP | LCF_ERROR, "Video codec not found"); return 1; } outputFormat->video_codec = codec_id; /* Initialize video stream */ video_st = avformat_new_stream(formatContext, video_codec); if (!video_st) { debuglog(LCF_DUMP | LCF_ERROR, "Could not initialize video AVStream"); return 1; } /* Fill video stream parameters */ video_st->id = formatContext->nb_streams - 1; video_st->codec->codec_type = AVMEDIA_TYPE_VIDEO; video_st->codec->codec_id = codec_id; video_st->codec->bit_rate = 400000; video_st->codec->width = width; video_st->codec->height = height; video_st->time_base = (AVRational){1,static_cast<int>(tasflags.framerate)}; video_st->codec->time_base = (AVRational){1,static_cast<int>(tasflags.framerate)}; video_st->codec->gop_size = 10; /* emit one intra frame every ten frames */ video_st->codec->max_b_frames = 1; video_st->codec->pix_fmt = AV_PIX_FMT_YUV420P; /* Some formats want stream headers to be separate. */ if (formatContext->oformat->flags & AVFMT_GLOBALHEADER) video_st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; /* Use a preset for h264 */ if (codec_id == AV_CODEC_ID_H264) av_opt_set(video_st->codec->priv_data, "preset", "slow", 0); /* Open the codec */ if (avcodec_open2(video_st->codec, video_codec, NULL) < 0) { debuglog(LCF_DUMP | LCF_ERROR, "Could not open video codec"); return 1; } /*** Create audio stream ***/ /* Initialize audio AVCodec */ AVCodec *audio_codec = NULL; AVCodecID audio_codec_id = AV_CODEC_ID_PCM_S16LE; //AVCodecID audio_codec_id = AV_CODEC_ID_VORBIS; audio_codec = avcodec_find_encoder(audio_codec_id); if (!audio_codec) { debuglog(LCF_DUMP | LCF_ERROR, "Audio codec not found"); return 1; } /* Initialize audio stream */ audio_st = avformat_new_stream(formatContext, audio_codec); if (!audio_st) { debuglog(LCF_DUMP | LCF_ERROR, "Could not initialize video AVStream"); return 1; } /* Fill audio stream parameters */ audio_st->id = formatContext->nb_streams - 1; audio_st->codec->codec_type = AVMEDIA_TYPE_AUDIO; if (audiocontext.outBitDepth == 8) audio_st->codec->sample_fmt = AV_SAMPLE_FMT_U8; else if (audiocontext.outBitDepth == 16) audio_st->codec->sample_fmt = AV_SAMPLE_FMT_S16; else { debuglog(LCF_DUMP | LCF_ERROR, "Unknown audio format"); return 1; } audio_st->codec->bit_rate = 64000; audio_st->codec->sample_rate = audiocontext.outFrequency; audio_st->codec->channels = audiocontext.outNbChannels; /* Some formats want stream headers to be separate. */ if (formatContext->oformat->flags & AVFMT_GLOBALHEADER) audio_st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; /* Open the codec */ if (avcodec_open2(audio_st->codec, audio_codec, NULL) < 0) { debuglog(LCF_DUMP | LCF_ERROR, "Could not open audio codec"); return 1; } /* Initialize video AVFrame */ video_frame = av_frame_alloc(); if (!video_frame) { debuglog(LCF_DUMP | LCF_ERROR, "Could not allocate AVFrame"); return 1; } video_frame->format = video_st->codec->pix_fmt; video_frame->width = video_st->codec->width; video_frame->height = video_st->codec->height; /* Initialize audio AVFrame */ audio_frame = av_frame_alloc(); /* Allocate the image buffer inside the AVFrame */ int ret = av_image_alloc(video_frame->data, video_frame->linesize, video_st->codec->width, video_st->codec->height, video_st->codec->pix_fmt, 32); if (ret < 0) { debuglog(LCF_DUMP | LCF_ERROR, "Could not allocate raw picture buffer"); return 1; } /* Initialize swscale context for pixel format conversion */ toYUVctx = sws_getContext(video_frame->width, video_frame->height, pixfmt, video_frame->width, video_frame->height, AV_PIX_FMT_YUV420P, SWS_LANCZOS | SWS_ACCURATE_RND, NULL,NULL,NULL); if (toYUVctx == NULL) { debuglog(LCF_DUMP | LCF_ERROR, "Could not allocate swscale context"); return 1; } /* Print informations on input and output streams */ threadState.setOwnCode(true); // We protect the following code because it performs IO that we hook av_dump_format(formatContext, 0, dumpfile, 1); /* Set up output file */ if (avio_open(&formatContext->pb, dumpfile, AVIO_FLAG_WRITE) < 0) { threadState.setOwnCode(false); debuglog(LCF_DUMP | LCF_ERROR, "Could not open video file"); return 1; } /* Write header */ if (avformat_write_header(formatContext, NULL) < 0) { threadState.setOwnCode(false); debuglog(LCF_DUMP | LCF_ERROR, "Could not write header"); return 1; } threadState.setOwnCode(false); return 0; }
int main(int argc, char *argv[]) { IDeckLinkIterator *deckLinkIterator = CreateDeckLinkIteratorInstance(); DeckLinkCaptureDelegate *delegate; BMDDisplayMode selectedDisplayMode = bmdModeNTSC; int displayModeCount = 0; int exitStatus = 1; int aconnection = 0, vconnection = 0, camera = 0, i = 0; int ch; AVDictionary *opts = NULL; BMDPixelFormat pix = bmdFormat8BitYUV; HRESULT result; pthread_t th; pthread_mutex_init(&sleepMutex, NULL); pthread_cond_init(&sleepCond, NULL); av_register_all(); if (!deckLinkIterator) { fprintf(stderr, "This application requires the DeckLink drivers installed.\n"); goto bail; } // Parse command line options while ((ch = getopt(argc, argv, "?hvc:s:f:a:m:n:p:M:F:C:A:V:o:w:S:d:")) != -1) { switch (ch) { case 'v': g_verbose = true; break; case 'm': g_videoModeIndex = atoi(optarg); break; case 'c': g_audioChannels = atoi(optarg); if (g_audioChannels != 2 && g_audioChannels != 8 && g_audioChannels != 16) { fprintf( stderr, "Invalid argument: Audio Channels must be either 2, 8 or 16\n"); goto bail; } break; case 's': g_audioSampleDepth = atoi(optarg); switch (g_audioSampleDepth) { case 16: sample_fmt = AV_SAMPLE_FMT_S16; break; case 32: sample_fmt = AV_SAMPLE_FMT_S32; break; default: fprintf(stderr, "Invalid argument:" " Audio Sample Depth must be either 16 bits" " or 32 bits\n"); goto bail; } break; case 'p': switch (atoi(optarg)) { case 8: pix = bmdFormat8BitYUV; pix_fmt = AV_PIX_FMT_UYVY422; break; case 10: pix = bmdFormat10BitYUV; pix_fmt = AV_PIX_FMT_YUV422P10; break; default: if (!strcmp("rgb10", optarg)) { pix = bmdFormat10BitRGB; pix_fmt = AV_PIX_FMT_RGB48; break; } if (!strcmp("yuv10", optarg)) { pix = bmdFormat10BitYUV; pix_fmt = AV_PIX_FMT_YUV422P10; break; } if (!strcmp("yuv8", optarg)) { pix = bmdFormat8BitYUV; pix_fmt = AV_PIX_FMT_UYVY422; break; } if (!strcmp("rgb8", optarg)) { pix = bmdFormat8BitARGB; pix_fmt = AV_PIX_FMT_ARGB; break; } fprintf( stderr, "Invalid argument: Pixel Format Depth must be either 8 bits or 10 bits\n"); goto bail; } break; case 'f': g_videoOutputFile = optarg; break; case 'n': g_maxFrames = atoi(optarg); break; case 'M': g_memoryLimit = atoi(optarg) * 1024 * 1024 * 1024L; break; case 'F': fmt = av_guess_format(optarg, NULL, NULL); break; case 'A': aconnection = atoi(optarg); break; case 'V': vconnection = atoi(optarg); break; case 'C': camera = atoi(optarg); break; case 'S': serial_fd = open(optarg, O_RDWR | O_NONBLOCK); break; case 'o': if (av_dict_parse_string(&opts, optarg, "=", ":", 0) < 0) { fprintf(stderr, "Cannot parse option string %s\n", optarg); goto bail; } break; case 'w': wallclock = true; break; case 'd': draw_bars = atoi(optarg); break; case '?': case 'h': usage(0); } } if (serial_fd > 0 && wallclock) { fprintf(stderr, "%s", "Wallclock and serial are not supported together\n" "Please disable either.\n"); exit(1); } /* Connect to the first DeckLink instance */ do result = deckLinkIterator->Next(&deckLink); while (i++ < camera); if (result != S_OK) { fprintf(stderr, "No DeckLink PCI cards found.\n"); goto bail; } if (deckLink->QueryInterface(IID_IDeckLinkInput, (void **)&deckLinkInput) != S_OK) { goto bail; } result = deckLink->QueryInterface(IID_IDeckLinkConfiguration, (void **)&deckLinkConfiguration); if (result != S_OK) { fprintf( stderr, "Could not obtain the IDeckLinkConfiguration interface - result = %08x\n", result); goto bail; } result = S_OK; switch (aconnection) { case 1: result = DECKLINK_SET_AUDIO_CONNECTION(bmdAudioConnectionAnalog); break; case 2: result = DECKLINK_SET_AUDIO_CONNECTION(bmdAudioConnectionEmbedded); break; case 3: result = DECKLINK_SET_AUDIO_CONNECTION(bmdAudioConnectionAESEBU); break; default: // do not change it break; } if (result != S_OK) { fprintf(stderr, "Failed to set audio input - result = %08x\n", result); goto bail; } result = S_OK; switch (vconnection) { case 1: result = DECKLINK_SET_VIDEO_CONNECTION(bmdVideoConnectionComposite); break; case 2: result = DECKLINK_SET_VIDEO_CONNECTION(bmdVideoConnectionComponent); break; case 3: result = DECKLINK_SET_VIDEO_CONNECTION(bmdVideoConnectionHDMI); break; case 4: result = DECKLINK_SET_VIDEO_CONNECTION(bmdVideoConnectionSDI); break; case 5: result = DECKLINK_SET_VIDEO_CONNECTION(bmdVideoConnectionOpticalSDI); break; case 6: result = DECKLINK_SET_VIDEO_CONNECTION(bmdVideoConnectionSVideo); break; default: // do not change it break; } if (result != S_OK) { fprintf(stderr, "Failed to set video input - result %08x\n", result); goto bail; } delegate = new DeckLinkCaptureDelegate(); deckLinkInput->SetCallback(delegate); // Obtain an IDeckLinkDisplayModeIterator to enumerate the display modes supported on output result = deckLinkInput->GetDisplayModeIterator(&displayModeIterator); if (result != S_OK) { fprintf( stderr, "Could not obtain the video output display mode iterator - result = %08x\n", result); goto bail; } if (!g_videoOutputFile) { fprintf(stderr, "Missing argument: Please specify output path using -f\n"); goto bail; } if (!fmt) { fmt = av_guess_format(NULL, g_videoOutputFile, NULL); if (!fmt) { fprintf( stderr, "Unable to guess output format, please specify explicitly using -F\n"); goto bail; } } if (g_videoModeIndex < 0) { fprintf(stderr, "No video mode specified\n"); usage(0); } while (displayModeIterator->Next(&displayMode) == S_OK) { if (g_videoModeIndex == displayModeCount) { selectedDisplayMode = displayMode->GetDisplayMode(); break; } displayModeCount++; displayMode->Release(); } result = deckLinkInput->EnableVideoInput(selectedDisplayMode, pix, 0); if (result != S_OK) { fprintf(stderr, "Failed to enable video input. Is another application using " "the card?\n"); goto bail; } result = deckLinkInput->EnableAudioInput(bmdAudioSampleRate48kHz, g_audioSampleDepth, g_audioChannels); if (result != S_OK) { fprintf(stderr, "Failed to enable audio input. Is another application using " "the card?\n"); goto bail; } oc = avformat_alloc_context(); oc->oformat = fmt; snprintf(oc->filename, sizeof(oc->filename), "%s", g_videoOutputFile); switch (pix) { case bmdFormat8BitARGB: case bmdFormat8BitYUV: fmt->video_codec = AV_CODEC_ID_RAWVIDEO; break; case bmdFormat10BitYUV: fmt->video_codec = AV_CODEC_ID_V210; break; case bmdFormat10BitRGB: fmt->video_codec = AV_CODEC_ID_R210; break; } fmt->audio_codec = (sample_fmt == AV_SAMPLE_FMT_S16 ? AV_CODEC_ID_PCM_S16LE : AV_CODEC_ID_PCM_S32LE); video_st = add_video_stream(oc, fmt->video_codec); audio_st = add_audio_stream(oc, fmt->audio_codec); if (serial_fd > 0 || wallclock) data_st = add_data_stream(oc, AV_CODEC_ID_TEXT); if (!(fmt->flags & AVFMT_NOFILE)) { if (avio_open(&oc->pb, oc->filename, AVIO_FLAG_WRITE) < 0) { fprintf(stderr, "Could not open '%s'\n", oc->filename); exit(1); } } avformat_write_header(oc, &opts); avpacket_queue_init(&queue); result = deckLinkInput->StartStreams(); if (result != S_OK) { goto bail; } // All Okay. exitStatus = 0; if (pthread_create(&th, NULL, push_packet, oc)) goto bail; // Block main thread until signal occurs pthread_mutex_lock(&sleepMutex); set_signal(); pthread_cond_wait(&sleepCond, &sleepMutex); pthread_mutex_unlock(&sleepMutex); deckLinkInput->StopStreams(); fprintf(stderr, "Stopping Capture\n"); avpacket_queue_end(&queue); bail: if (displayModeIterator != NULL) { displayModeIterator->Release(); displayModeIterator = NULL; } if (deckLinkInput != NULL) { deckLinkInput->Release(); deckLinkInput = NULL; } if (deckLink != NULL) { deckLink->Release(); deckLink = NULL; } if (deckLinkIterator != NULL) { deckLinkIterator->Release(); } if (oc != NULL) { av_write_trailer(oc); if (!(fmt->flags & AVFMT_NOFILE)) { /* close the output file */ avio_close(oc->pb); } } return exitStatus; }
MediaRet MediaRecorder::setup_sound_stream(const char *fname, AVOutputFormat *fmt) { oc = avformat_alloc_context(); if(!oc) return MRET_ERR_NOMEM; oc->oformat = fmt; strncpy(oc->filename, fname, sizeof(oc->filename) - 1); oc->filename[sizeof(oc->filename) - 1] = 0; if(fmt->audio_codec == CODEC_ID_NONE) return MRET_OK; AVCodecContext *ctx; #if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53,10,0) aud_st = av_new_stream(oc, 1); #else aud_st = avformat_new_stream(oc, NULL); #endif if(!aud_st) { avformat_free_context(oc); oc = NULL; return MRET_ERR_NOMEM; } AVCodec *codec = avcodec_find_encoder(fmt->audio_codec); ctx = aud_st->codec; ctx->codec_id = fmt->audio_codec; ctx->codec_type = AVMEDIA_TYPE_AUDIO; // Some encoders don't like int16_t (SAMPLE_FMT_S16) ctx->sample_fmt = codec->sample_fmts[0]; // This was changed in the initial ffmpeg 3.0 update, // but shouldn't (as far as I'm aware) cause problems with older versions ctx->bit_rate = 128000; // arbitrary; in case we're generating mp3 ctx->sample_rate = soundGetSampleRate(); ctx->channels = 2; ctx->time_base.den = 60; ctx->time_base.num = 1; if(fmt->flags & AVFMT_GLOBALHEADER) ctx->flags |= CODEC_FLAG_GLOBAL_HEADER; #if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(53,6,0) if(!codec || avcodec_open(ctx, codec)) { #else if(!codec || avcodec_open2(ctx, codec, NULL)) { #endif avformat_free_context(oc); oc = NULL; return MRET_ERR_NOCODEC; } return MRET_OK; } MediaRet MediaRecorder::setup_video_stream(const char *fname, int w, int h, int d) { AVCodecContext *ctx; #if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53,10,0) vid_st = av_new_stream(oc, 0); #else vid_st = avformat_new_stream(oc, NULL); #endif if(!vid_st) { avformat_free_context(oc); oc = NULL; return MRET_ERR_NOMEM; } ctx = vid_st->codec; ctx->codec_id = oc->oformat->video_codec; ctx->codec_type = AVMEDIA_TYPE_VIDEO; ctx->width = w; ctx->height = h; ctx->time_base.den = 60; ctx->time_base.num = 1; // dunno if any of these help; some output just looks plain crappy // will have to investigate further ctx->bit_rate = 400000; ctx->gop_size = 12; ctx->max_b_frames = 2; switch(d) { case 16: // FIXME: test & make endian-neutral pixfmt = PIX_FMT_RGB565LE; break; case 24: pixfmt = PIX_FMT_RGB24; break; case 32: default: // should never be anything else pixfmt = PIX_FMT_RGBA; break; } ctx->pix_fmt = pixfmt; pixsize = d >> 3; linesize = pixsize * w; ctx->max_b_frames = 2; if(oc->oformat->flags & AVFMT_GLOBALHEADER) ctx->flags |= CODEC_FLAG_GLOBAL_HEADER; AVCodec *codec = avcodec_find_encoder(oc->oformat->video_codec); // make sure RGB is supported (mostly not) if(codec->pix_fmts) { const enum PixelFormat *p; #if LIBAVCODEC_VERSION_MAJOR < 55 int64_t mask = 0; #endif for(p = codec->pix_fmts; *p != -1; p++) { // may get complaints about 1LL; thus the cast #if LIBAVCODEC_VERSION_MAJOR < 55 mask |= ((int64_t)1) << *p; #endif if(*p == pixfmt) break; } if(*p == -1) { // if not supported, use a converter to the next best format // this is swscale, the converter used by the output demo #if LIBAVCODEC_VERSION_MAJOR < 55 enum PixelFormat dp = (PixelFormat)avcodec_find_best_pix_fmt(mask, pixfmt, 0, NULL); #else #if LIBAVCODEC_VERSION_MICRO >= 100 // FFmpeg enum AVPixelFormat dp = avcodec_find_best_pix_fmt_of_list(codec->pix_fmts, pixfmt, 0, NULL); #else // Libav enum AVPixelFormat dp = avcodec_find_best_pix_fmt2(codec->pix_fmts, pixfmt, 0, NULL); #endif #endif if(dp == -1) dp = codec->pix_fmts[0]; if(!(convpic = avcodec_alloc_frame()) || avpicture_alloc((AVPicture *)convpic, dp, w, h) < 0) { avformat_free_context(oc); oc = NULL; return MRET_ERR_NOMEM; } #if LIBSWSCALE_VERSION_INT < AV_VERSION_INT(0, 12, 0) converter = sws_getContext(w, h, pixfmt, w, h, dp, SWS_BICUBIC, NULL, NULL, NULL); #else converter = sws_alloc_context(); // what a convoluted, inefficient way to set options av_opt_set_int(converter, "sws_flags", SWS_BICUBIC, 0); av_opt_set_int(converter, "srcw", w, 0); av_opt_set_int(converter, "srch", h, 0); av_opt_set_int(converter, "dstw", w, 0); av_opt_set_int(converter, "dsth", h, 0); av_opt_set_int(converter, "src_format", pixfmt, 0); av_opt_set_int(converter, "dst_format", dp, 0); sws_init_context(converter, NULL, NULL); #endif ctx->pix_fmt = dp; } } #if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(53,6,0) if(!codec || avcodec_open(ctx, codec)) { #else if(!codec || avcodec_open2(ctx, codec, NULL)) { #endif avformat_free_context(oc); oc = NULL; return MRET_ERR_NOCODEC; } return MRET_OK; } MediaRet MediaRecorder::finish_setup(const char *fname) { if(audio_buf) free(audio_buf); if(audio_buf2) free(audio_buf2); audio_buf2 = NULL; in_audio_buf2 = 0; if(aud_st) { frame_len = aud_st->codec->frame_size * 4; sample_len = soundGetSampleRate() * 4 / 60; switch(aud_st->codec->codec_id) { case CODEC_ID_PCM_S16LE: case CODEC_ID_PCM_S16BE: case CODEC_ID_PCM_U16LE: case CODEC_ID_PCM_U16BE: frame_len = sample_len; } audio_buf = (uint8_t *)malloc(AUDIO_BUF_LEN); if(!audio_buf) { avformat_free_context(oc); oc = NULL; return MRET_ERR_NOMEM; } if(frame_len != sample_len && (frame_len > sample_len || sample_len % frame_len)) { audio_buf2 = (uint16_t *)malloc(frame_len); if(!audio_buf2) { avformat_free_context(oc); oc = NULL; return MRET_ERR_NOMEM; } } } else audio_buf = NULL; if(video_buf) free(video_buf); if(vid_st) { video_buf = (uint8_t *)malloc(VIDEO_BUF_LEN); if(!video_buf) { avformat_free_context(oc); oc = NULL; return MRET_ERR_NOMEM; } } else { video_buf = NULL; } if(!(oc->oformat->flags & AVFMT_NOFILE)) { if(avio_open(&oc->pb, fname, AVIO_FLAG_WRITE) < 0) { avformat_free_context(oc); oc = NULL; return MRET_ERR_FERR; } } avformat_write_header(oc, NULL); return MRET_OK; } MediaRet MediaRecorder::Record(const char *fname, int width, int height, int depth) { if(oc) return MRET_ERR_RECORDING; aud_st = vid_st = NULL; AVOutputFormat *fmt = av_guess_format(NULL, fname, NULL); if(!fmt) fmt = av_guess_format("avi", NULL, NULL); if(!fmt || fmt->video_codec == CODEC_ID_NONE) return MRET_ERR_FMTGUESS; MediaRet ret; if((ret = setup_sound_stream(fname, fmt)) == MRET_OK && (ret = setup_video_stream(fname, width, height, depth)) == MRET_OK) ret = finish_setup(fname); return ret; } MediaRet MediaRecorder::Record(const char *fname) { if(oc) return MRET_ERR_RECORDING; aud_st = vid_st = NULL; AVOutputFormat *fmt = av_guess_format(NULL, fname, NULL); if(!fmt) fmt = av_guess_format("wav", NULL, NULL); if(!fmt || fmt->audio_codec == CODEC_ID_NONE) return MRET_ERR_FMTGUESS; MediaRet ret; if((ret = setup_sound_stream(fname, fmt)) == MRET_OK) ret = finish_setup(fname); return ret; } void MediaRecorder::Stop() { if(oc) { if(in_audio_buf2) AddFrame((uint16_t *)0); av_write_trailer(oc); avformat_free_context(oc); oc = NULL; } if(audio_buf) { free(audio_buf); audio_buf = NULL; } if(video_buf) { free(video_buf); video_buf = NULL; } if(audio_buf2) { free(audio_buf2); audio_buf2 = NULL; } if(convpic) { avpicture_free((AVPicture *)convpic); av_free(convpic); convpic = NULL; } if(converter) { sws_freeContext(converter); converter = NULL; } } MediaRecorder::~MediaRecorder() { Stop(); } // Still needs updating for avcodec_encode_video2 MediaRet MediaRecorder::AddFrame(const uint8_t *vid) { if(!oc || !vid_st) return MRET_OK; AVCodecContext *ctx = vid_st->codec; AVPacket pkt; #if LIBAVCODEC_VERSION_MAJOR > 56 int ret, got_packet = 0; #endif // strip borders. inconsistent between depths for some reason // but fortunately consistent between gb/gba. int tbord, rbord; switch(pixsize) { case 2: // 16-bit: 2 @ right, 1 @ top tbord = 1; rbord = 2; break; case 3: // 24-bit: no border tbord = rbord = 0; break; case 4: // 32-bit: 1 @ right, 1 @ top tbord = 1; rbord = 1; break; } avpicture_fill((AVPicture *)pic, (uint8_t *)vid + tbord * (linesize + pixsize * rbord), (PixelFormat)pixfmt, ctx->width + rbord, ctx->height); // satisfy stupid sws_scale()'s integrity check pic->data[1] = pic->data[2] = pic->data[3] = pic->data[0]; pic->linesize[1] = pic->linesize[2] = pic->linesize[3] = pic->linesize[0]; AVFrame *f = pic; if(converter) { sws_scale(converter, pic->data, pic->linesize, 0, ctx->height, convpic->data, convpic->linesize); f = convpic; } av_init_packet(&pkt); pkt.stream_index = vid_st->index; if(oc->oformat->flags & AVFMT_RAWPICTURE) { // this won't work due to border // not sure what formats set this, anyway pkt.flags |= AV_PKT_FLAG_KEY; pkt.data = f->data[0]; pkt.size = linesize * ctx->height; } else { #if LIBAVCODEC_VERSION_MAJOR > 56 pkt.data = video_buf; pkt.size = VIDEO_BUF_LEN; f->format = ctx->pix_fmt; f->width = ctx->width; f->height = ctx->height; ret = avcodec_encode_video2(ctx, &pkt, f, &got_packet); if(!ret && got_packet && ctx->coded_frame) { ctx->coded_frame->pts = pkt.pts; ctx->coded_frame->key_frame = !!(pkt.flags & AV_PKT_FLAG_KEY); } #else pkt.size = avcodec_encode_video(ctx, video_buf, VIDEO_BUF_LEN, f); #endif if(!pkt.size) return MRET_OK; if(ctx->coded_frame && ctx->coded_frame->pts != AV_NOPTS_VALUE) pkt.pts = av_rescale_q(ctx->coded_frame->pts, ctx->time_base, vid_st->time_base); if(pkt.size > VIDEO_BUF_LEN) { avformat_free_context(oc); oc = NULL; return MRET_ERR_BUFSIZE; } if(ctx->coded_frame->key_frame) pkt.flags |= AV_PKT_FLAG_KEY; pkt.data = video_buf; } if(av_interleaved_write_frame(oc, &pkt) < 0) { avformat_free_context(oc); oc = NULL; // yeah, err might not be a file error, but if it isn't, it's a // coding error rather than a user-controllable error // and better resolved using debugging return MRET_ERR_FERR; } return MRET_OK; } #if LIBAVCODEC_VERSION_MAJOR > 56 /* FFmpeg depricated avcodec_encode_audio. * It was removed completely in 3.0. * This will at least get audio recording *working* */ static inline int MediaRecorderEncodeAudio(AVCodecContext *ctx, AVPacket *pkt, uint8_t *buf, int buf_size, const short *samples) { AVFrame *frame; av_init_packet(pkt); int ret, samples_size, got_packet = 0; pkt->data = buf; pkt->size = buf_size; if (samples) { frame = frame = av_frame_alloc(); if (ctx->frame_size) { frame->nb_samples = ctx->frame_size; } else { frame->nb_samples = (int64_t)buf_size * 8 / (av_get_bits_per_sample(ctx->codec_id) * ctx->channels); } frame->format = ctx->sample_fmt; frame->channel_layout = ctx->channel_layout; samples_size = av_samples_get_buffer_size(NULL, ctx->channels, frame->nb_samples, ctx->sample_fmt, 1); avcodec_fill_audio_frame(frame, ctx->channels, ctx->sample_fmt, (const uint8_t *)samples, samples_size, 1); //frame->pts = AV_NOPTS_VALUE; } else { frame = NULL; } ret = avcodec_encode_audio2(ctx, pkt, frame, &got_packet); if (!ret && got_packet && ctx->coded_frame) { ctx->coded_frame->pts = pkt->pts; ctx->coded_frame->key_frame = !!(pkt->flags & AV_PKT_FLAG_KEY); } if (frame && frame->extended_data != frame->data) av_freep(&frame->extended_data); return ret; }
static int open_output_file(const char *filename) { AVStream *out_stream; AVStream *in_stream; AVCodecContext *dec_ctx, *enc_ctx; AVCodec *encoder; int ret; unsigned int i; ofmt_ctx = NULL; avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, filename); if (!ofmt_ctx) { av_log(NULL, AV_LOG_ERROR, "Could not create output context\n"); return AVERROR_UNKNOWN; } for (i = 0; i < ifmt_ctx->nb_streams; i++) { out_stream = avformat_new_stream(ofmt_ctx, NULL); if (!out_stream) { av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n"); return AVERROR_UNKNOWN; } in_stream = ifmt_ctx->streams[i]; dec_ctx = in_stream->codec; enc_ctx = out_stream->codec; if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO || dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) { /* in this example, we choose transcoding to same codec */ encoder = avcodec_find_encoder(dec_ctx->codec_id); if (!encoder) { av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n"); return AVERROR_INVALIDDATA; } /* In this example, we transcode to same properties (picture size, * sample rate etc.). These properties can be changed for output * streams easily using filters */ if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) { enc_ctx->height = dec_ctx->height; enc_ctx->width = dec_ctx->width; enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio; /* take first format from list of supported formats */ enc_ctx->pix_fmt = encoder->pix_fmts[0]; /* video time_base can be set to whatever is handy and supported by encoder */ enc_ctx->time_base = dec_ctx->time_base; } else { enc_ctx->sample_rate = dec_ctx->sample_rate; enc_ctx->channel_layout = dec_ctx->channel_layout; enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout); /* take first format from list of supported formats */ enc_ctx->sample_fmt = encoder->sample_fmts[0]; enc_ctx->time_base = (AVRational){1, enc_ctx->sample_rate}; } /* Third parameter can be used to pass settings to encoder */ ret = avcodec_open2(enc_ctx, encoder, NULL); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i); return ret; } } else if (dec_ctx->codec_type == AVMEDIA_TYPE_UNKNOWN) { av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type, cannot proceed\n", i); return AVERROR_INVALIDDATA; } else { /* if this stream must be remuxed */ ret = avcodec_copy_context(ofmt_ctx->streams[i]->codec, ifmt_ctx->streams[i]->codec); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "Copying stream context failed\n"); return ret; } } if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; } av_dump_format(ofmt_ctx, 0, filename, 1); if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) { ret = avio_open(&ofmt_ctx->pb, filename, AVIO_FLAG_WRITE); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", filename); return ret; } } /* init muxer, write output file header */ ret = avformat_write_header(ofmt_ctx, NULL); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n"); return ret; } return 0; }
static int start_ffmpeg_impl(struct RenderData *rd, int rectx, int recty, ReportList *reports) { /* Handle to the output file */ AVFormatContext *of; AVOutputFormat *fmt; AVDictionary *opts = NULL; char name[256], error[1024]; const char **exts; ffmpeg_type = rd->ffcodecdata.type; ffmpeg_codec = rd->ffcodecdata.codec; ffmpeg_audio_codec = rd->ffcodecdata.audio_codec; ffmpeg_video_bitrate = rd->ffcodecdata.video_bitrate; ffmpeg_audio_bitrate = rd->ffcodecdata.audio_bitrate; ffmpeg_gop_size = rd->ffcodecdata.gop_size; ffmpeg_autosplit = rd->ffcodecdata.flags & FFMPEG_AUTOSPLIT_OUTPUT; /* Determine the correct filename */ BKE_ffmpeg_filepath_get(name, rd); PRINT("Starting output to %s(ffmpeg)...\n" " Using type=%d, codec=%d, audio_codec=%d,\n" " video_bitrate=%d, audio_bitrate=%d,\n" " gop_size=%d, autosplit=%d\n" " render width=%d, render height=%d\n", name, ffmpeg_type, ffmpeg_codec, ffmpeg_audio_codec, ffmpeg_video_bitrate, ffmpeg_audio_bitrate, ffmpeg_gop_size, ffmpeg_autosplit, rectx, recty); exts = get_file_extensions(ffmpeg_type); if (!exts) { BKE_report(reports, RPT_ERROR, "No valid formats found"); return 0; } fmt = av_guess_format(NULL, exts[0], NULL); if (!fmt) { BKE_report(reports, RPT_ERROR, "No valid formats found"); return 0; } of = avformat_alloc_context(); if (!of) { BKE_report(reports, RPT_ERROR, "Error opening output file"); return 0; } of->oformat = fmt; of->packet_size = rd->ffcodecdata.mux_packet_size; if (ffmpeg_audio_codec != AV_CODEC_ID_NONE) { ffmpeg_dict_set_int(&opts, "muxrate", rd->ffcodecdata.mux_rate); } else { av_dict_set(&opts, "muxrate", "0", 0); } ffmpeg_dict_set_int(&opts, "preload", (int)(0.5 * AV_TIME_BASE)); of->max_delay = (int)(0.7 * AV_TIME_BASE); fmt->audio_codec = ffmpeg_audio_codec; BLI_strncpy(of->filename, name, sizeof(of->filename)); /* set the codec to the user's selection */ switch (ffmpeg_type) { case FFMPEG_AVI: case FFMPEG_MOV: case FFMPEG_MKV: fmt->video_codec = ffmpeg_codec; break; case FFMPEG_OGG: fmt->video_codec = AV_CODEC_ID_THEORA; break; case FFMPEG_DV: fmt->video_codec = AV_CODEC_ID_DVVIDEO; break; case FFMPEG_MPEG1: fmt->video_codec = AV_CODEC_ID_MPEG1VIDEO; break; case FFMPEG_MPEG2: fmt->video_codec = AV_CODEC_ID_MPEG2VIDEO; break; case FFMPEG_H264: fmt->video_codec = AV_CODEC_ID_H264; break; case FFMPEG_XVID: fmt->video_codec = AV_CODEC_ID_MPEG4; break; case FFMPEG_FLV: fmt->video_codec = AV_CODEC_ID_FLV1; break; case FFMPEG_MPEG4: default: fmt->video_codec = AV_CODEC_ID_MPEG4; break; } if (fmt->video_codec == AV_CODEC_ID_DVVIDEO) { if (rectx != 720) { BKE_report(reports, RPT_ERROR, "Render width has to be 720 pixels for DV!"); return 0; } if (rd->frs_sec != 25 && recty != 480) { BKE_report(reports, RPT_ERROR, "Render height has to be 480 pixels for DV-NTSC!"); return 0; } if (rd->frs_sec == 25 && recty != 576) { BKE_report(reports, RPT_ERROR, "Render height has to be 576 pixels for DV-PAL!"); return 0; } } if (ffmpeg_type == FFMPEG_DV) { fmt->audio_codec = AV_CODEC_ID_PCM_S16LE; if (ffmpeg_audio_codec != AV_CODEC_ID_NONE && rd->ffcodecdata.audio_mixrate != 48000 && rd->ffcodecdata.audio_channels != 2) { BKE_report(reports, RPT_ERROR, "FFMPEG only supports 48khz / stereo audio for DV!"); av_dict_free(&opts); return 0; } } if (fmt->video_codec != AV_CODEC_ID_NONE) { video_stream = alloc_video_stream(rd, fmt->video_codec, of, rectx, recty, error, sizeof(error)); PRINT("alloc video stream %p\n", video_stream); if (!video_stream) { if (error[0]) BKE_report(reports, RPT_ERROR, error); else BKE_report(reports, RPT_ERROR, "Error initializing video stream"); av_dict_free(&opts); return 0; } } if (ffmpeg_audio_codec != AV_CODEC_ID_NONE) { audio_stream = alloc_audio_stream(rd, fmt->audio_codec, of, error, sizeof(error)); if (!audio_stream) { if (error[0]) BKE_report(reports, RPT_ERROR, error); else BKE_report(reports, RPT_ERROR, "Error initializing audio stream"); av_dict_free(&opts); return 0; } } if (!(fmt->flags & AVFMT_NOFILE)) { if (avio_open(&of->pb, name, AVIO_FLAG_WRITE) < 0) { BKE_report(reports, RPT_ERROR, "Could not open file for writing"); av_dict_free(&opts); return 0; } } if (avformat_write_header(of, NULL) < 0) { BKE_report(reports, RPT_ERROR, "Could not initialize streams, probably unsupported codec combination"); av_dict_free(&opts); avio_close(of->pb); return 0; } outfile = of; av_dump_format(of, 0, name, 1); av_dict_free(&opts); return 1; }
AUD_FFMPEGWriter::AUD_FFMPEGWriter(std::string filename, AUD_DeviceSpecs specs, AUD_Container format, AUD_Codec codec, unsigned int bitrate) : m_position(0), m_specs(specs), m_input_samples(0) { static const char* formats[] = { NULL, "ac3", "flac", "matroska", "mp2", "mp3", "ogg", "wav" }; m_formatCtx = avformat_alloc_context(); if (!m_formatCtx) AUD_THROW(AUD_ERROR_FFMPEG, context_error); strcpy(m_formatCtx->filename, filename.c_str()); m_outputFmt = m_formatCtx->oformat = av_guess_format(formats[format], filename.c_str(), NULL); if (!m_outputFmt) { avformat_free_context(m_formatCtx); AUD_THROW(AUD_ERROR_FFMPEG, context_error); } switch(codec) { case AUD_CODEC_AAC: m_outputFmt->audio_codec = AV_CODEC_ID_AAC; break; case AUD_CODEC_AC3: m_outputFmt->audio_codec = AV_CODEC_ID_AC3; break; case AUD_CODEC_FLAC: m_outputFmt->audio_codec = AV_CODEC_ID_FLAC; break; case AUD_CODEC_MP2: m_outputFmt->audio_codec = AV_CODEC_ID_MP2; break; case AUD_CODEC_MP3: m_outputFmt->audio_codec = AV_CODEC_ID_MP3; break; case AUD_CODEC_PCM: switch(specs.format) { case AUD_FORMAT_U8: m_outputFmt->audio_codec = AV_CODEC_ID_PCM_U8; break; case AUD_FORMAT_S16: m_outputFmt->audio_codec = AV_CODEC_ID_PCM_S16LE; break; case AUD_FORMAT_S24: m_outputFmt->audio_codec = AV_CODEC_ID_PCM_S24LE; break; case AUD_FORMAT_S32: m_outputFmt->audio_codec = AV_CODEC_ID_PCM_S32LE; break; case AUD_FORMAT_FLOAT32: m_outputFmt->audio_codec = AV_CODEC_ID_PCM_F32LE; break; case AUD_FORMAT_FLOAT64: m_outputFmt->audio_codec = AV_CODEC_ID_PCM_F64LE; break; default: m_outputFmt->audio_codec = AV_CODEC_ID_NONE; break; } break; case AUD_CODEC_VORBIS: m_outputFmt->audio_codec = AV_CODEC_ID_VORBIS; break; default: m_outputFmt->audio_codec = AV_CODEC_ID_NONE; break; } try { if(m_outputFmt->audio_codec == AV_CODEC_ID_NONE) AUD_THROW(AUD_ERROR_SPECS, codec_error); m_stream = avformat_new_stream(m_formatCtx, NULL); if(!m_stream) AUD_THROW(AUD_ERROR_FFMPEG, stream_error); m_codecCtx = m_stream->codec; m_codecCtx->codec_id = m_outputFmt->audio_codec; m_codecCtx->codec_type = AVMEDIA_TYPE_AUDIO; m_codecCtx->bit_rate = bitrate; m_codecCtx->sample_rate = int(m_specs.rate); m_codecCtx->channels = m_specs.channels; m_codecCtx->time_base.num = 1; m_codecCtx->time_base.den = m_codecCtx->sample_rate; switch(m_specs.format) { case AUD_FORMAT_U8: m_convert = AUD_convert_float_u8; m_codecCtx->sample_fmt = AV_SAMPLE_FMT_U8; break; case AUD_FORMAT_S16: m_convert = AUD_convert_float_s16; m_codecCtx->sample_fmt = AV_SAMPLE_FMT_S16; break; case AUD_FORMAT_S32: m_convert = AUD_convert_float_s32; m_codecCtx->sample_fmt = AV_SAMPLE_FMT_S32; break; case AUD_FORMAT_FLOAT32: m_convert = AUD_convert_copy<float>; m_codecCtx->sample_fmt = AV_SAMPLE_FMT_FLT; break; case AUD_FORMAT_FLOAT64: m_convert = AUD_convert_float_double; m_codecCtx->sample_fmt = AV_SAMPLE_FMT_DBL; break; default: AUD_THROW(AUD_ERROR_FFMPEG, format_error); } try { if(m_formatCtx->oformat->flags & AVFMT_GLOBALHEADER) m_codecCtx->flags |= CODEC_FLAG_GLOBAL_HEADER; AVCodec* codec = avcodec_find_encoder(m_codecCtx->codec_id); if(!codec) AUD_THROW(AUD_ERROR_FFMPEG, codec_error); if(codec->sample_fmts) { // Check if the preferred sample format for this codec is supported. const enum AVSampleFormat *p = codec->sample_fmts; for(; *p != -1; p++) { if(*p == m_stream->codec->sample_fmt) break; } if(*p == -1) { // Sample format incompatible with codec. Defaulting to a format known to work. m_stream->codec->sample_fmt = codec->sample_fmts[0]; } } if(avcodec_open2(m_codecCtx, codec, NULL)) AUD_THROW(AUD_ERROR_FFMPEG, codec_error); m_output_buffer.resize(FF_MIN_BUFFER_SIZE); int samplesize = AUD_MAX(AUD_SAMPLE_SIZE(m_specs), AUD_DEVICE_SAMPLE_SIZE(m_specs)); if(m_codecCtx->frame_size <= 1) { m_input_size = FF_MIN_BUFFER_SIZE * 8 / m_codecCtx->bits_per_coded_sample / m_codecCtx->channels; m_input_buffer.resize(m_input_size * samplesize); } else { m_input_buffer.resize(m_codecCtx->frame_size * samplesize); m_input_size = m_codecCtx->frame_size; } #ifdef FFMPEG_HAVE_ENCODE_AUDIO2 m_frame = av_frame_alloc(); if (!m_frame) AUD_THROW(AUD_ERROR_FFMPEG, codec_error); avcodec_get_frame_defaults(m_frame); m_frame->linesize[0] = m_input_size * samplesize; m_frame->format = m_codecCtx->sample_fmt; m_frame->nb_samples = m_input_size; # ifdef FFMPEG_HAVE_AVFRAME_SAMPLE_RATE m_frame->sample_rate = m_codecCtx->sample_rate; # endif # ifdef FFMPEG_HAVE_FRAME_CHANNEL_LAYOUT m_frame->channel_layout = m_codecCtx->channel_layout; # endif m_sample_size = av_get_bytes_per_sample(m_codecCtx->sample_fmt); m_frame_pts = 0; m_deinterleave = av_sample_fmt_is_planar(m_codecCtx->sample_fmt); if(m_deinterleave) m_deinterleave_buffer.resize(m_input_size * m_codecCtx->channels * m_sample_size); #endif try { if(avio_open(&m_formatCtx->pb, filename.c_str(), AVIO_FLAG_WRITE)) AUD_THROW(AUD_ERROR_FILE, file_error); avformat_write_header(m_formatCtx, NULL); } catch(AUD_Exception&) { avcodec_close(m_codecCtx); av_freep(&m_formatCtx->streams[0]->codec); throw; } } catch(AUD_Exception&) { av_freep(&m_formatCtx->streams[0]); throw; } } catch(AUD_Exception&) { av_free(m_formatCtx); throw; } }
/* media file output */ int main(int argc, char **argv) { OutputStream video_st = { 0 }, audio_st = { 0 }; const char *filename; AVOutputFormat *fmt; AVFormatContext *oc; AVCodec *audio_codec, *video_codec; int ret; int have_video = 0, have_audio = 0; int encode_video = 0, encode_audio = 0; AVDictionary *opt = NULL; /* Initialize libavcodec, and register all codecs and formats. */ av_register_all(); if (argc < 2) { printf("usage: %s output_file\n" "API example program to output a media file with libavformat.\n" "This program generates a synthetic audio and video stream, encodes and\n" "muxes them into a file named output_file.\n" "The output format is automatically guessed according to the file extension.\n" "Raw images can also be output by using '%%d' in the filename.\n" "\n", argv[0]); return 1; } filename = argv[1]; if (argc > 3 && !strcmp(argv[2], "-flags")) { av_dict_set(&opt, argv[2]+1, argv[3], 0); } /* allocate the output media context */ avformat_alloc_output_context2(&oc, NULL, NULL, filename); if (!oc) { printf("Could not deduce output format from file extension: using MPEG.\n"); avformat_alloc_output_context2(&oc, NULL, "mpeg", filename); } if (!oc) return 1; fmt = oc->oformat; /* Add the audio and video streams using the default format codecs * and initialize the codecs. */ if (fmt->video_codec != AV_CODEC_ID_NONE) { add_stream(&video_st, oc, &video_codec, fmt->video_codec); have_video = 1; encode_video = 1; } if (fmt->audio_codec != AV_CODEC_ID_NONE) { add_stream(&audio_st, oc, &audio_codec, fmt->audio_codec); have_audio = 1; encode_audio = 1; } /* Now that all the parameters are set, we can open the audio and * video codecs and allocate the necessary encode buffers. */ if (have_video) open_video(oc, video_codec, &video_st, opt); if (have_audio) open_audio(oc, audio_codec, &audio_st, opt); av_dump_format(oc, 0, filename, 1); /* open the output file, if needed */ if (!(fmt->flags & AVFMT_NOFILE)) { ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE); if (ret < 0) { fprintf(stderr, "Could not open '%s': %s\n", filename, av_err2str(ret)); return 1; } } /* Write the stream header, if any. */ ret = avformat_write_header(oc, &opt); if (ret < 0) { fprintf(stderr, "Error occurred when opening output file: %s\n", av_err2str(ret)); return 1; } while (encode_video || encode_audio) { /* select the stream to encode */ if (encode_video && (!encode_audio || av_compare_ts(video_st.next_pts, video_st.st->codec->time_base, audio_st.next_pts, audio_st.st->codec->time_base) <= 0)) { encode_video = !write_video_frame(oc, &video_st); } else { encode_audio = !write_audio_frame(oc, &audio_st); } } /* Write the trailer, if any. The trailer must be written before you * close the CodecContexts open when you wrote the header; otherwise * av_write_trailer() may try to use memory that was freed on * av_codec_close(). */ av_write_trailer(oc); /* Close each codec. */ if (have_video) close_stream(oc, &video_st); if (have_audio) close_stream(oc, &audio_st); if (!(fmt->flags & AVFMT_NOFILE)) /* Close the output file. */ avio_closep(&oc->pb); /* free the stream */ avformat_free_context(oc); return 0; }
int main(int argc, char **argv) { int ret = 0; AVPacket dec_pkt; AVCodec *enc_codec; if (argc != 4) { fprintf(stderr, "Usage: %s <input file> <encode codec> <output file>\n" "The output format is guessed according to the file extension.\n" "\n", argv[0]); return -1; } ret = av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_VAAPI, NULL, NULL, 0); if (ret < 0) { fprintf(stderr, "Failed to create a VAAPI device. Error code: %s\n", av_err2str(ret)); return -1; } if ((ret = open_input_file(argv[1])) < 0) goto end; if (!(enc_codec = avcodec_find_encoder_by_name(argv[2]))) { fprintf(stderr, "Could not find encoder '%s'\n", argv[2]); ret = -1; goto end; } if ((ret = (avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, argv[3]))) < 0) { fprintf(stderr, "Failed to deduce output format from file extension. Error code: " "%s\n", av_err2str(ret)); goto end; } if (!(encoder_ctx = avcodec_alloc_context3(enc_codec))) { ret = AVERROR(ENOMEM); goto end; } ret = avio_open(&ofmt_ctx->pb, argv[3], AVIO_FLAG_WRITE); if (ret < 0) { fprintf(stderr, "Cannot open output file. " "Error code: %s\n", av_err2str(ret)); goto end; } /* read all packets and only transcoding video */ while (ret >= 0) { if ((ret = av_read_frame(ifmt_ctx, &dec_pkt)) < 0) break; if (video_stream == dec_pkt.stream_index) ret = dec_enc(&dec_pkt, enc_codec); av_packet_unref(&dec_pkt); } /* flush decoder */ dec_pkt.data = NULL; dec_pkt.size = 0; ret = dec_enc(&dec_pkt, enc_codec); av_packet_unref(&dec_pkt); /* flush encoder */ ret = encode_write(NULL); /* write the trailer for output stream */ av_write_trailer(ofmt_ctx); end: avformat_close_input(&ifmt_ctx); avformat_close_input(&ofmt_ctx); avcodec_free_context(&decoder_ctx); avcodec_free_context(&encoder_ctx); av_buffer_unref(&hw_device_ctx); return ret; }
int dc_ffmpeg_audio_muxer_open(AudioOutputFile * p_aoutf, char * psz_name) { p_aoutf->p_fmt = NULL; AVOutputFormat * p_output_fmt; AVCodecContext * p_audio_codec_ctx = p_aoutf->p_codec_ctx; // strcpy(p_aoutf->psz_name, p_aconf->psz_name); // p_aoutf->i_abr = p_aconf->i_bitrate; // p_aoutf->i_asr = p_aconf->i_samplerate; // p_aoutf->i_ach = p_aconf->i_channels; // strcpy(p_aoutf->psz_codec, p_aconf->psz_codec); /* Find output format */ p_output_fmt = av_guess_format(NULL, psz_name, NULL); if (!p_output_fmt) { fprintf(stderr, "Cannot find suitable output format\n"); return -1; } p_aoutf->p_fmt = avformat_alloc_context(); if (!p_aoutf->p_fmt) { fprintf(stderr, "Cannot allocate memory for pOutVideoFormatCtx\n"); return -1; } p_aoutf->p_fmt->oformat = p_output_fmt; strcpy(p_aoutf->p_fmt->filename, psz_name); /* Open the output file */ if (!(p_output_fmt->flags & AVFMT_NOFILE)) { if (avio_open(&p_aoutf->p_fmt->pb, psz_name, URL_WRONLY) < 0) { fprintf(stderr, "Cannot not open '%s'\n", psz_name); return -1; } } AVStream * p_audio_stream = avformat_new_stream(p_aoutf->p_fmt, p_aoutf->p_codec); if (!p_audio_stream) { fprintf(stderr, "Cannot create output video stream\n"); return -1; } p_audio_stream->codec->codec_id = p_aoutf->p_codec->id; p_audio_stream->codec->codec_type = AVMEDIA_TYPE_AUDIO; p_audio_stream->codec->bit_rate = p_audio_codec_ctx->bit_rate;//p_aoutf->p_adata->i_bitrate; p_audio_stream->codec->sample_rate = p_audio_codec_ctx->sample_rate;//p_aoutf->p_adata->i_samplerate; p_audio_stream->codec->channels = p_audio_codec_ctx->channels;//p_aoutf->p_adata->i_channels; p_audio_stream->codec->sample_fmt = AV_SAMPLE_FMT_S16; // if (p_aoutf->p_fmt->oformat->flags & AVFMT_GLOBALHEADER) // p_aoutf->p_codec_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER; //p_video_stream->codec = p_voutf->p_codec_ctx; /* open the video codec */ if (avcodec_open2(p_audio_stream->codec, p_aoutf->p_codec, NULL) < 0) { fprintf(stderr, "Cannot open output video codec\n"); return -1; } avformat_write_header(p_aoutf->p_fmt, NULL); return 0; }
int main(int argc, char **argv) { OutputStream video_st = { 0 }, audio_st = { 0 }; const char *filename; AVOutputFormat *fmt; AVFormatContext *oc; int have_video = 0, have_audio = 0; int encode_video = 0, encode_audio = 0; /* Initialize libavcodec, and register all codecs and formats. */ av_register_all(); if (argc != 2) { printf("usage: %s output_file\n" "API example program to output a media file with libavformat.\n" "The output format is automatically guessed according to the file extension.\n" "Raw images can also be output by using '%%d' in the filename\n" "\n", argv[0]); return 1; } filename = argv[1]; /* Autodetect the output format from the name. default is MPEG. */ fmt = av_guess_format(NULL, filename, NULL); if (!fmt) { printf("Could not deduce output format from file extension: using MPEG.\n"); fmt = av_guess_format("mpeg", NULL, NULL); } if (!fmt) { fprintf(stderr, "Could not find suitable output format\n"); return 1; } /* Allocate the output media context. */ oc = avformat_alloc_context(); if (!oc) { fprintf(stderr, "Memory error\n"); return 1; } oc->oformat = fmt; snprintf(oc->filename, sizeof(oc->filename), "%s", filename); /* Add the audio and video streams using the default format codecs * and initialize the codecs. */ if (fmt->video_codec != AV_CODEC_ID_NONE) { add_video_stream(&video_st, oc, fmt->video_codec); have_video = 1; encode_video = 1; } if (fmt->audio_codec != AV_CODEC_ID_NONE) { add_audio_stream(&audio_st, oc, fmt->audio_codec); have_audio = 1; encode_audio = 1; } /* Now that all the parameters are set, we can open the audio and * video codecs and allocate the necessary encode buffers. */ if (have_video) open_video(oc, &video_st); if (have_audio) open_audio(oc, &audio_st); av_dump_format(oc, 0, filename, 1); /* open the output file, if needed */ if (!(fmt->flags & AVFMT_NOFILE)) { if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0) { fprintf(stderr, "Could not open '%s'\n", filename); return 1; } } /* Write the stream header, if any. */ avformat_write_header(oc, NULL); while (encode_video || encode_audio) { /* select the stream to encode */ if (encode_video && (!encode_audio || av_compare_ts(video_st.next_pts, video_st.enc->time_base, audio_st.next_pts, audio_st.enc->time_base) <= 0)) { encode_video = !write_video_frame(oc, &video_st); } else { encode_audio = !process_audio_stream(oc, &audio_st); } } /* Write the trailer, if any. The trailer must be written before you * close the CodecContexts open when you wrote the header; otherwise * av_write_trailer() may try to use memory that was freed on * av_codec_close(). */ av_write_trailer(oc); /* Close each codec. */ if (have_video) close_stream(oc, &video_st); if (have_audio) close_stream(oc, &audio_st); if (!(fmt->flags & AVFMT_NOFILE)) /* Close the output file. */ avio_close(oc->pb); /* free the stream */ avformat_free_context(oc); return 0; }
int demux(const char *in_filename, const char *out_filename_v, const char *out_filename_a) { AVOutputFormat *ofmt_a = NULL, *ofmt_v = NULL; // Input AVFormatContext and Output AVFormatContext AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx_a = NULL, *ofmt_ctx_v = NULL; AVPacket pkt, enc_pkt; int ret, i; int video_index = -1, audio_index = -1; int frame_index = 0; av_register_all(); // Input if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) { printf("Could not open input file."); goto end; } if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) { printf("Failed to retrieve input stream information"); goto end; } // Output avformat_alloc_output_context2(&ofmt_ctx_v, NULL, NULL, out_filename_v); if (!ofmt_ctx_v) { printf("Could not create output context.\n"); ret = AVERROR_UNKNOWN; goto end; } ofmt_v = ofmt_ctx_v->oformat; avformat_alloc_output_context2(&ofmt_ctx_a, NULL, NULL, out_filename_a); if (!ofmt_ctx_a) { printf("Could not create output context\n"); ret = AVERROR_UNKNOWN; goto end; } ofmt_a = ofmt_ctx_a->oformat; for (i = 0; i < ifmt_ctx->nb_streams; i++) { // Create output AVStream according to input AVStream AVFormatContext *ofmt_ctx; AVStream *in_stream = ifmt_ctx->streams[i]; AVStream *out_stream = NULL; if (ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) { video_index = i; out_stream = avformat_new_stream(ofmt_ctx_v, in_stream->codec->codec); ofmt_ctx = ofmt_ctx_v; } else if (ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) { audio_index = i; out_stream = avformat_new_stream(ofmt_ctx_a, in_stream->codec->codec); ofmt_ctx = ofmt_ctx_a; } else { break; } if (!out_stream) { printf("Failed allocating output stream\n"); ret = AVERROR_UNKNOWN; goto end; } // Copy the settings of AVCodecContext if (avcodec_copy_context(out_stream->codec, in_stream->codec) < 0) { printf( "Failed to copy context from input to output stream codec context\n"); goto end; } out_stream->codec->codec_tag = 0; if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; } // Open output file if (!(ofmt_v->flags & AVFMT_NOFILE)) { if (avio_open(&ofmt_ctx_v->pb, out_filename_v, AVIO_FLAG_WRITE) < 0) { printf("Could not open output file '%s'", out_filename_v); goto end; } } if (!(ofmt_a->flags & AVFMT_NOFILE)) { if (avio_open(&ofmt_ctx_a->pb, out_filename_a, AVIO_FLAG_WRITE) < 0) { printf("Could not open output file '%s'", out_filename_a); goto end; } } // Write file header if (avformat_write_header(ofmt_ctx_v, NULL) < 0) { printf("Error occurred when opening video output file\n"); goto end; } // if (avformat_write_header(ofmt_ctx_a, NULL) < 0) { // printf("Error occurred when opening audio output file\n"); // goto end; // } #if USE_H264BSF AVBitStreamFilterContext* h264bsfc = av_bitstream_filter_init("h264_mp4toannexb"); #endif while (1) { AVFormatContext *ofmt_ctx; AVStream *in_stream, *out_stream; AVCodecContext *dec_ctx = NULL, *enc_ctx = NULL; AVCodec *dec = NULL, *encoder = NULL; AVFrame *frame = NULL; int got_frame; // Get an AVPacket if (av_read_frame(ifmt_ctx, &pkt) < 0) break; in_stream = ifmt_ctx->streams[pkt.stream_index]; if (pkt.stream_index == video_index) { ofmt_ctx = ofmt_ctx_v; out_stream = avformat_new_stream(ofmt_ctx, NULL); /* find decoder for the stream */ dec_ctx = in_stream->codec; dec = avcodec_find_decoder(dec_ctx->codec_id); if (!dec) { fprintf(stderr, "Failed to find %s codec\n", av_get_media_type_string(AVMEDIA_TYPE_VIDEO)); return AVERROR(EINVAL); } /* Open decoder */ int ret = avcodec_open2(dec_ctx, dec, NULL); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i); return ret; } // decoder is MPEG-4 part 2 printf("decoder is %s\n", dec->long_name); // NOTE frame = av_frame_alloc(); ret = avcodec_decode_video2(dec_ctx, frame, &got_frame, &pkt); if (ret < 0) { av_frame_free(&frame); av_log(NULL, AV_LOG_ERROR, "Decoding failed\n"); break; } // printf("frame duration is %d\n", frame->pkt_duration); // encode encoder = avcodec_find_encoder(AV_CODEC_ID_H264); // avcodec_copy_context(enc_ctx, dec_ctx); enc_ctx = avcodec_alloc_context3(encoder); if (!encoder) { av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n"); return AVERROR_INVALIDDATA; } enc_ctx->height = dec_ctx->height; enc_ctx->width = dec_ctx->width; enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio; enc_ctx->pix_fmt = encoder->pix_fmts[0]; enc_ctx->time_base = dec_ctx->time_base; //enc_ctx->time_base.num = 1; //enc_ctx->time_base.den = 25; //H264的必备选项,没有就会错 enc_ctx->me_range = 16; enc_ctx->max_qdiff = 4; enc_ctx->qmin = 10; enc_ctx->qmax = 51; enc_ctx->qcompress = 0.6; enc_ctx->refs = 3; enc_ctx->bit_rate = 1500; ret = avcodec_open2(enc_ctx, encoder, NULL); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i); return ret; } av_opt_set(enc_ctx->priv_data, "preset", "slow", 0); // AVOutputFormat *formatOut = av_guess_format(NULL, out_filename_v, NULL); enc_pkt.data = NULL; enc_pkt.size = 0; av_init_packet(&enc_pkt); ret = avcodec_open2(enc_ctx, encoder, NULL); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "Failed to open encoder for stream #%u\n", i); return ret; } ret = avcodec_encode_video2(enc_ctx, &enc_pkt, frame, &got_frame); printf("demo is %s\n", "hello"); av_frame_free(&frame); avcodec_close(enc_ctx); avcodec_close(dec_ctx); // printf("Write Video Packet. size:%d\tpts:%lld\n", pkt.size, pkt.pts); #if USE_H264BSF av_bitstream_filter_filter(h264bsfc, in_stream->codec, NULL, &pkt.data, &pkt.size, pkt.data, pkt.size, 0); #endif } else { continue; } // Convert PTS/DTS enc_pkt.pts = av_rescale_q_rnd(enc_pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding) (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)); enc_pkt.dts = av_rescale_q_rnd(enc_pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding) (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)); enc_pkt.duration = av_rescale_q(enc_pkt.duration, in_stream->time_base, out_stream->time_base); // enc_pkt.pos = -1; enc_pkt.stream_index = video_index; if (av_interleaved_write_frame(ofmt_ctx, &enc_pkt) < 0) { printf("Error muxing packet\n"); break; } av_free_packet(&enc_pkt); av_free_packet(&pkt); frame_index++; } #if USE_H264BSF av_bitstream_filter_close(h264bsfc); #endif // Write file trailer av_write_trailer(ofmt_ctx_a); av_write_trailer(ofmt_ctx_v); end: avformat_close_input(&ifmt_ctx); /* close output */ if (ofmt_ctx_a && !(ofmt_a->flags & AVFMT_NOFILE)) avio_close(ofmt_ctx_a->pb); if (ofmt_ctx_v && !(ofmt_v->flags & AVFMT_NOFILE)) avio_close(ofmt_ctx_v->pb); avformat_free_context(ofmt_ctx_a); avformat_free_context(ofmt_ctx_v); if (ret < 0 && ret != AVERROR_EOF) { printf("Error occurred.\n"); return -1; } return 0; }
mfxStatus FFmpeg_Writer_Init( const char *strFileName, mfxU32 videoType, mfxU16 nBitRate, mfxU16 nDstWidth, mfxU16 nDstHeight, mfxU16 GopRefDist, mfxU8* SPSbuf, int SPSbufsize, mfxU8* PPSbuf, int PPSbufsize) { MSDK_CHECK_POINTER(strFileName, MFX_ERR_NULL_PTR); g_GopRefDist = GopRefDist; // Initialize libavcodec, and register all codecs and formats avcodec_register_all(); av_register_all(); avformat_network_init(); //not necessary for file-only transcode // Get default output format config based on selected container type g_pFmt = av_guess_format(FORMAT_SHORT_NAME, FORMAT_FILENAME, NULL); // Sub title processing ignored g_pFmt->subtitle_codec = AV_CODEC_ID_NONE; switch (videoType) { case MFX_CODEC_AVC: g_pFmt->video_codec = AV_CODEC_ID_H264; break; case MFX_CODEC_MPEG2: g_pFmt->video_codec = AV_CODEC_ID_MPEG2VIDEO; break; default: printf("Unsupported video format\n"); return MFX_ERR_UNSUPPORTED; } if (!g_pFmt) { printf("FFMPEG: Could not find suitable output format\n"); return MFX_ERR_UNSUPPORTED; } // Allocate the output media context g_pFormatCtxMux = avformat_alloc_context(); if (!g_pFormatCtxMux) { printf("FFMPEG: avformat_alloc_context error\n"); return MFX_ERR_UNSUPPORTED; } g_pFormatCtxMux->oformat = g_pFmt; sprintf_s(g_pFormatCtxMux->filename, "%s", strFileName); if (g_pFmt->video_codec == CODEC_ID_NONE) return MFX_ERR_UNSUPPORTED; g_pVideoStream = avformat_new_stream(g_pFormatCtxMux, NULL); if (!g_pVideoStream) { printf("FFMPEG: Could not alloc video stream\n"); return MFX_ERR_UNKNOWN; } g_videoStreamMuxIdx = g_pVideoStream->index; AVCodecContext *c = g_pVideoStream->codec; c->codec_id = g_pFmt->video_codec; c->codec_type = AVMEDIA_TYPE_VIDEO; c->bit_rate = nBitRate*1000; c->width = nDstWidth; c->height = nDstHeight; // time base: this is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented. for fixed-fps content, // timebase should be 1/framerate and timestamp increments should be identically 1. c->time_base.den = g_pFormatCtx->streams[g_videoStreamIdx]->r_frame_rate.num; c->time_base.num = g_pFormatCtx->streams[g_videoStreamIdx]->r_frame_rate.den; // Some formats want stream headers to be separate if(g_pFormatCtxMux->oformat->flags & AVFMT_GLOBALHEADER) c->flags |= CODEC_FLAG_GLOBAL_HEADER; #ifdef PROCESS_AUDIO g_pFmt->audio_codec = g_pAudioStream->codec->codec_id; // Create new audio stream for the container g_pAudioStreamMux = avformat_new_stream(g_pFormatCtxMux, NULL); if (!g_pAudioStreamMux) { printf("FFMPEG: Could not alloc audio stream\n"); return MFX_ERR_UNKNOWN; } g_audioStreamMuxIdx = g_pAudioStreamMux->index; // Copy audio codec config from input stream to output stream AVCodecContext *ca = g_pAudioStreamMux->codec; ca->codec_id = g_pAudioStream->codec->codec_id; ca->codec_type = AVMEDIA_TYPE_AUDIO; ca->sample_rate = g_pAudioStream->codec->sample_rate; ca->channels = g_pAudioStream->codec->channels; ca->bit_rate = g_pAudioStream->codec->bit_rate; ca->sample_fmt = g_pAudioStream->codec->sample_fmt; ca->frame_size = g_pAudioStream->codec->frame_size; ca->bits_per_coded_sample = g_pAudioStream->codec->bits_per_coded_sample; ca->channel_layout = g_pAudioStream->codec->channel_layout; ca->time_base = g_pAudioStream->codec->time_base; g_pAudioStreamMux->time_base = g_pAudioStream->codec->time_base; // Extra data apparently contains essential channel config info (must be copied!) ca->extradata_size = g_pAudioStream->codec->extradata_size; g_audioExtraData = (uint8_t*)av_malloc(ca->extradata_size); ca->extradata = g_audioExtraData; memcpy(ca->extradata, g_pAudioStream->codec->extradata, ca->extradata_size); // Some formats want stream headers to be separate if(g_pFormatCtxMux->oformat->flags & AVFMT_GLOBALHEADER) ca->flags |= CODEC_FLAG_GLOBAL_HEADER; #endif // Open the output container file if (avio_open(&g_pFormatCtxMux->pb, g_pFormatCtxMux->filename, AVIO_FLAG_WRITE) < 0) { printf("FFMPEG: Could not open '%s'\n", g_pFormatCtxMux->filename); return MFX_ERR_UNKNOWN; } g_pExtDataBuffer = (mfxU8*)av_malloc(SPSbufsize + PPSbufsize); if(!g_pExtDataBuffer) { printf("FFMPEG: could not allocate required buffer\n"); return MFX_ERR_UNKNOWN; } memcpy(g_pExtDataBuffer, SPSbuf, SPSbufsize); memcpy(g_pExtDataBuffer + SPSbufsize, PPSbuf, PPSbufsize); // Codec "extradata" conveys the H.264 stream SPS and PPS info (MPEG2: sequence header is housed in SPS buffer, PPS buffer is empty) c->extradata = g_pExtDataBuffer; c->extradata_size = SPSbufsize + PPSbufsize; // Write container header if(avformat_write_header(g_pFormatCtxMux, NULL)) { printf("FFMPEG: avformat_write_header error!\n"); return MFX_ERR_UNKNOWN; } return MFX_ERR_NONE; }
static int write_packet(AVFormatContext *s, AVPacket *pkt) { VideoData *img = s->priv_data; AVIOContext *pb[3]; char filename[1024]; AVCodecContext *codec = s->streams[ pkt->stream_index ]->codec; int i; if (!img->is_pipe) { if (av_get_frame_filename(filename, sizeof(filename), img->path, img->img_number) < 0 && img->img_number > 1) { av_log(s, AV_LOG_ERROR, "Could not get frame filename number %d from pattern '%s'\n", img->img_number, img->path); return AVERROR(EIO); } for(i = 0; i < 3; i++) { if (avio_open(&pb[i], filename, AVIO_WRONLY) < 0) { av_log(s, AV_LOG_ERROR, "Could not open file : %s\n", filename); return AVERROR(EIO); } if(!img->split_planes) break; filename[ strlen(filename) - 1 ] = 'U' + i; } } else { pb[0] = s->pb; } if(img->split_planes) { int ysize = codec->width * codec->height; avio_write(pb[0], pkt->data , ysize); avio_write(pb[1], pkt->data + ysize, (pkt->size - ysize) / 2); avio_write(pb[2], pkt->data + ysize + (pkt->size - ysize) / 2, (pkt->size - ysize) / 2); avio_flush(pb[1]); avio_flush(pb[2]); avio_close(pb[1]); avio_close(pb[2]); } else { if(av_str2id(img_tags, s->filename) == CODEC_ID_JPEG2000) { AVStream *st = s->streams[0]; if(st->codec->extradata_size > 8 && AV_RL32(st->codec->extradata + 4) == MKTAG('j', 'p', '2', 'h')) { if(pkt->size < 8 || AV_RL32(pkt->data + 4) != MKTAG('j', 'p', '2', 'c')) goto error; avio_wb32(pb[0], 12); ffio_wfourcc(pb[0], "jP "); avio_wb32(pb[0], 0x0D0A870A); // signature avio_wb32(pb[0], 20); ffio_wfourcc(pb[0], "ftyp"); ffio_wfourcc(pb[0], "jp2 "); avio_wb32(pb[0], 0); ffio_wfourcc(pb[0], "jp2 "); avio_write(pb[0], st->codec->extradata, st->codec->extradata_size); } else if(pkt->size < 8 || (!st->codec->extradata_size && AV_RL32(pkt->data + 4) != MKTAG('j', 'P', ' ', ' '))) // signature { error: av_log(s, AV_LOG_ERROR, "malformated jpeg2000 codestream\n"); return -1; } } avio_write(pb[0], pkt->data, pkt->size); } avio_flush(pb[0]); if (!img->is_pipe) { avio_close(pb[0]); } img->img_number++; return 0; }
bool muxerFlv::open(const char *file, ADM_videoStream *s,uint32_t nbAudioTrack,ADM_audioStream **a) { uint32_t fcc=s->getFCC(); bool r=true; char *fileTitle=NULL; if(fourCC::check(fcc,(uint8_t *)"FLV1") || isVP6Compatible(fcc)) { }else { GUI_Error_HIG("Unsupported","Only FLV1 & VP6 supported for video"); return false; } if(nbAudioTrack) for(int i=0;i<nbAudioTrack;i++) { uint32_t acc=a[i]->getInfo()->encoding; if(acc!=WAV_MP2 && acc!=WAV_MP3 && acc!=WAV_AAC) { GUI_Error_HIG("Unsupported","Only AAC & mpegaudio supported for audio"); return false; } uint32_t fq=a[i]->getInfo()->frequency; if(fq!=44100 &&fq!=22050 && fq!=11025) { GUI_Error_HIG("Unsupported","Only 44.1, 22.050 and 11.025 kHz supported"); return false; } } if(false==setupMuxer("flv",file)) { printf("[FLV] Failed to open muxer\n"); return false; } if(initVideo(s)==false) { printf("[FLV] Failed to init video\n"); return false; } AVCodecContext *c = video_st->codec; AVDictionary *dict = NULL; rescaleFps(s->getAvgFps1000(),&(c->time_base)); c->gop_size=15; if(initAudio(nbAudioTrack,a)==false) { printf("[FLV] Failed to init audio\n"); return false; } int er=avio_open(&(oc->pb), file, AVIO_FLAG_WRITE); if (er) { ADM_error("[Flv]: Failed to open file :%s, er=%d\n",file,er); r=false; goto finish; } char buf[64]; snprintf(buf, sizeof(buf), "%d", AV_TIME_BASE / 10); av_dict_set(&dict, "preload", buf, 0); av_dict_set(&dict, "max_delay", buf, 0); av_dict_set(&dict, "muxrate", "10080000", 0); if (avformat_write_header(oc, &dict) < 0) { printf("[Flv Muxer] Muxer rejected the parameters\n"); r=false; goto finish; } initialized=true; finish: vStream=s; aStreams=a; nbAStreams=nbAudioTrack; return r; }
/** * Open an output file and the required encoder. * Also set some basic encoder parameters. * Some of these parameters are based on the input file's parameters. */ static int open_output_file(const char *filename, AVCodecContext *input_codec_context, AVFormatContext **output_format_context, AVCodecContext **output_codec_context) { AVIOContext *output_io_context = NULL; AVStream *stream = NULL; AVCodec *output_codec = NULL; int error; /** Open the output file to write to it. */ if ((error = avio_open(&output_io_context, filename, AVIO_FLAG_WRITE)) < 0) { fprintf(stderr, "Could not open output file '%s' (error '%s')\n", filename, get_error_text(error)); return error; } /** Create a new format context for the output container format. */ if (!(*output_format_context = avformat_alloc_context())) { fprintf(stderr, "Could not allocate output format context\n"); return AVERROR(ENOMEM); } /** Associate the output file (pointer) with the container format context. */ (*output_format_context)->pb = output_io_context; /** Guess the desired container format based on the file extension. */ if (!((*output_format_context)->oformat = av_guess_format(NULL, filename, NULL))) { fprintf(stderr, "Could not find output file format\n"); goto cleanup; } av_strlcpy((*output_format_context)->filename, filename, sizeof((*output_format_context)->filename)); /** Find the encoder to be used by its name. */ if (!(output_codec = avcodec_find_encoder(AV_CODEC_ID_AAC))) { fprintf(stderr, "Could not find an AAC encoder.\n"); goto cleanup; } /** Create a new audio stream in the output file container. */ if (!(stream = avformat_new_stream(*output_format_context, output_codec))) { fprintf(stderr, "Could not create new stream\n"); error = AVERROR(ENOMEM); goto cleanup; } /** Save the encoder context for easier access later. */ *output_codec_context = stream->codec; /** * Set the basic encoder parameters. * The input file's sample rate is used to avoid a sample rate conversion. */ (*output_codec_context)->channels = OUTPUT_CHANNELS; (*output_codec_context)->channel_layout = av_get_default_channel_layout(OUTPUT_CHANNELS); (*output_codec_context)->sample_rate = input_codec_context->sample_rate; (*output_codec_context)->sample_fmt = output_codec->sample_fmts[0]; (*output_codec_context)->bit_rate = OUTPUT_BIT_RATE; /** Allow the use of the experimental AAC encoder */ (*output_codec_context)->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL; /** Set the sample rate for the container. */ stream->time_base.den = input_codec_context->sample_rate; stream->time_base.num = 1; /** * Some container formats (like MP4) require global headers to be present * Mark the encoder so that it behaves accordingly. */ if ((*output_format_context)->oformat->flags & AVFMT_GLOBALHEADER) (*output_codec_context)->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; /** Open the encoder for the audio stream to use it later. */ if ((error = avcodec_open2(*output_codec_context, output_codec, NULL)) < 0) { fprintf(stderr, "Could not open output codec (error '%s')\n", get_error_text(error)); goto cleanup; } return 0; cleanup: avio_closep(&(*output_format_context)->pb); avformat_free_context(*output_format_context); *output_format_context = NULL; return error < 0 ? error : AVERROR_EXIT; }
static int parse_playlist(AppleHTTPContext *c, const char *url, struct variant *var, AVIOContext *in) { int ret = 0, duration = 0, is_segment = 0, is_variant = 0, bandwidth = 0; enum KeyType key_type = KEY_NONE; uint8_t iv[16] = ""; int has_iv = 0; char key[MAX_URL_SIZE]; char line[1024]; const char *ptr; int close_in = 0; if (!in) { close_in = 1; if ((ret = avio_open(&in, url, AVIO_FLAG_READ)) < 0) return ret; } read_chomp_line(in, line, sizeof(line)); if (strcmp(line, "#EXTM3U")) { ret = AVERROR_INVALIDDATA; goto fail; } if (var) { free_segment_list(var); var->finished = 0; } while (!in->eof_reached) { read_chomp_line(in, line, sizeof(line)); if (av_strstart(line, "#EXT-X-STREAM-INF:", &ptr)) { struct variant_info info = {{0}}; is_variant = 1; ff_parse_key_value(ptr, (ff_parse_key_val_cb) handle_variant_args, &info); bandwidth = atoi(info.bandwidth); } else if (av_strstart(line, "#EXT-X-KEY:", &ptr)) { struct key_info info = {{0}}; ff_parse_key_value(ptr, (ff_parse_key_val_cb) handle_key_args, &info); key_type = KEY_NONE; has_iv = 0; if (!strcmp(info.method, "AES-128")) key_type = KEY_AES_128; if (!strncmp(info.iv, "0x", 2) || !strncmp(info.iv, "0X", 2)) { ff_hex_to_data(iv, info.iv + 2); has_iv = 1; } av_strlcpy(key, info.uri, sizeof(key)); } else if (av_strstart(line, "#EXT-X-TARGETDURATION:", &ptr)) { if (!var) { var = new_variant(c, 0, url, NULL); if (!var) { ret = AVERROR(ENOMEM); goto fail; } } var->target_duration = atoi(ptr); } else if (av_strstart(line, "#EXT-X-MEDIA-SEQUENCE:", &ptr)) { if (!var) { var = new_variant(c, 0, url, NULL); if (!var) { ret = AVERROR(ENOMEM); goto fail; } } var->start_seq_no = atoi(ptr); } else if (av_strstart(line, "#EXT-X-ENDLIST", &ptr)) { if (var) var->finished = 1; } else if (av_strstart(line, "#EXTINF:", &ptr)) { is_segment = 1; duration = atoi(ptr); } else if (av_strstart(line, "#", NULL)) { continue; } else if (line[0]) { if (is_variant) { if (!new_variant(c, bandwidth, line, url)) { ret = AVERROR(ENOMEM); goto fail; } is_variant = 0; bandwidth = 0; } if (is_segment) { struct segment *seg; if (!var) { var = new_variant(c, 0, url, NULL); if (!var) { ret = AVERROR(ENOMEM); goto fail; } } seg = av_malloc(sizeof(struct segment)); if (!seg) { ret = AVERROR(ENOMEM); goto fail; } seg->duration = duration; seg->key_type = key_type; if (has_iv) { memcpy(seg->iv, iv, sizeof(iv)); } else { int seq = var->start_seq_no + var->n_segments; memset(seg->iv, 0, sizeof(seg->iv)); AV_WB32(seg->iv + 12, seq); } ff_make_absolute_url(seg->key, sizeof(seg->key), url, key); ff_make_absolute_url(seg->url, sizeof(seg->url), url, line); dynarray_add(&var->segments, &var->n_segments, seg); is_segment = 0; } } } if (var) var->last_load_time = av_gettime(); fail: if (close_in) avio_close(in); return ret; }
void MP4Encoder::EncodeStart() { //1. 注册所有组件 av_register_all(); //2. 初始化输出码流的AVFormatContext avformat_alloc_output_context2(&pFormatCtx, NULL, NULL, this->mp4Path); //3. 打开待输出的视频文件 if (avio_open(&pFormatCtx->pb, this->mp4Path, AVIO_FLAG_READ_WRITE)) { LOGE("open output file failed"); return; } //4. 初始化视频码流 pStream = avformat_new_stream(pFormatCtx, NULL); if (pStream == NULL) { LOGE("allocating output stream failed"); return; } //5. 寻找编码器并打开编码器 pCodec = avcodec_find_encoder(AV_CODEC_ID_MPEG4); if (!pCodec) { LOGE("could not find encoder"); return; } //6. 分配编码器并设置参数 pCodecCtx = avcodec_alloc_context3(pCodec); pCodecCtx->codec_id = pCodec->id; pCodecCtx->codec_type = AVMEDIA_TYPE_VIDEO; pCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P; pCodecCtx->width = height; pCodecCtx->height = width; pCodecCtx->time_base.num = 1; pCodecCtx->time_base.den = 25; pCodecCtx->bit_rate = 400000; pCodecCtx->gop_size = 12; //将AVCodecContext的成员复制到AVCodecParameters结构体 avcodec_parameters_from_context(pStream->codecpar, pCodecCtx); av_stream_set_r_frame_rate(pStream, {1, 25}); //7. 打开编码器 if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) { LOGE("open encoder fail!"); return; } //输出格式信息 av_dump_format(pFormatCtx, 0, this->mp4Path, 1); //初始化帧 pFrame = av_frame_alloc(); pFrame->width = pCodecCtx->width; pFrame->height = pCodecCtx->height; pFrame->format = pCodecCtx->pix_fmt; int bufferSize = av_image_get_buffer_size(pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, 1); pFrameBuffer = (uint8_t *) av_malloc(bufferSize); av_image_fill_arrays(pFrame->data, pFrame->linesize, pFrameBuffer, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, 1); AVDictionary *opt = 0; //H.264 if (pCodecCtx->codec_id == AV_CODEC_ID_H264) { av_dict_set_int(&opt, "video_track_timescale", 25, 0); av_dict_set(&opt, "preset", "slow", 0); av_dict_set(&opt, "tune", "zerolatency", 0); } //8. 写文件头 avformat_write_header(pFormatCtx, &opt); //创建已编码帧 av_new_packet(&avPacket, bufferSize * 3); //标记正在转换 this->transform = true; }
static struct proxy_output_ctx * alloc_proxy_output_ffmpeg( struct anim * anim, AVStream * st, int proxy_size, int width, int height, int quality) { struct proxy_output_ctx * rv = MEM_callocN( sizeof(struct proxy_output_ctx), "alloc_proxy_output"); char fname[FILE_MAXDIR+FILE_MAXFILE]; // JPEG requires this width = round_up(width, 8); height = round_up(height, 8); rv->proxy_size = proxy_size; rv->anim = anim; get_proxy_filename(rv->anim, rv->proxy_size, fname, TRUE); BLI_make_existing_file(fname); rv->of = avformat_alloc_context(); rv->of->oformat = av_guess_format("avi", NULL, NULL); BLI_snprintf(rv->of->filename, sizeof(rv->of->filename), "%s", fname); fprintf(stderr, "Starting work on proxy: %s\n", rv->of->filename); rv->st = av_new_stream(rv->of, 0); rv->c = rv->st->codec; rv->c->codec_type = AVMEDIA_TYPE_VIDEO; rv->c->codec_id = CODEC_ID_MJPEG; rv->c->width = width; rv->c->height = height; rv->of->oformat->video_codec = rv->c->codec_id; rv->codec = avcodec_find_encoder(rv->c->codec_id); if (!rv->codec) { fprintf(stderr, "No ffmpeg MJPEG encoder available? " "Proxy not built!\n"); av_free(rv->of); return NULL; } if (rv->codec->pix_fmts) { rv->c->pix_fmt = rv->codec->pix_fmts[0]; } else { rv->c->pix_fmt = PIX_FMT_YUVJ420P; } rv->c->sample_aspect_ratio = rv->st->sample_aspect_ratio = st->codec->sample_aspect_ratio; rv->c->time_base.den = 25; rv->c->time_base.num = 1; rv->st->time_base = rv->c->time_base; if (rv->of->flags & AVFMT_GLOBALHEADER) { rv->c->flags |= CODEC_FLAG_GLOBAL_HEADER; } if (av_set_parameters(rv->of, NULL) < 0) { fprintf(stderr, "Couldn't set output parameters? " "Proxy not built!\n"); av_free(rv->of); return 0; } if (avio_open(&rv->of->pb, fname, AVIO_FLAG_WRITE) < 0) { fprintf(stderr, "Couldn't open outputfile! " "Proxy not built!\n"); av_free(rv->of); return 0; } avcodec_open(rv->c, rv->codec); rv->video_buffersize = 2000000; rv->video_buffer = (uint8_t*)MEM_mallocN( rv->video_buffersize, "FFMPEG video buffer"); rv->orig_height = st->codec->height; if (st->codec->width != width || st->codec->height != height || st->codec->pix_fmt != rv->c->pix_fmt) { rv->frame = avcodec_alloc_frame(); avpicture_fill((AVPicture*) rv->frame, MEM_mallocN(avpicture_get_size( rv->c->pix_fmt, round_up(width, 16), height), "alloc proxy output frame"), rv->c->pix_fmt, round_up(width, 16), height); rv->sws_ctx = sws_getContext( st->codec->width, st->codec->height, st->codec->pix_fmt, width, height, rv->c->pix_fmt, SWS_FAST_BILINEAR | SWS_PRINT_INFO, NULL, NULL, NULL); } av_write_header(rv->of); return rv; }
bool muxerMP4::open(const char *file, ADM_videoStream *s,uint32_t nbAudioTrack,ADM_audioStream **a) { if(!isMpeg4Compatible(s->getFCC()) && !isH264Compatible(s->getFCC()) && !isH265Compatible(s->getFCC())) { GUI_Error_HIG("Unsupported","Only MP4Video, H264, and H265 supported for video"); return false; } if(nbAudioTrack) for(int i=0;i<nbAudioTrack;i++) { uint32_t acc=a[i]->getInfo()->encoding; if(acc!=WAV_MP2 && acc!=WAV_MP3 && acc!=WAV_AAC && acc!=WAV_AC3) { GUI_Error_HIG("Unsupported","Only AAC, AC3, and mpegaudio supported for audio"); return false; } } /* All seems fine, open stuff */ const char *f="mp4"; if(muxerConfig.muxerType==MP4_MUXER_PSP) f="psp"; if(false==setupMuxer(f,file)) { printf("[MP4] Failed to open muxer\n"); return false; } if(initVideo(s)==false) { printf("[MP4] Failed to init video\n"); return false; } AVCodecContext *c; AVRational myTimeBase; c = video_st->codec; rescaleFps(s->getAvgFps1000(),&(c->time_base)); myTimeBase=video_st->time_base=c->time_base; ADM_info("Video stream time base :%d,%d\n",video_st->time_base.num,video_st->time_base.den); c->gop_size=15; if(initAudio(nbAudioTrack,a)==false) { printf("[MP4] Failed to init audio\n"); return false; } // /audio int er = avio_open(&(oc->pb), file, AVIO_FLAG_WRITE); ADM_info("Timebase In = %d/%d\n",myTimeBase.num,myTimeBase.den); if (er) { ADM_error("[Mp4]: Failed to open file :%s, er=%d\n",file,er); return false; } AVDictionary *dict = NULL; char buf[64]; snprintf(buf, sizeof(buf), "%d", AV_TIME_BASE / 10); av_dict_set(&dict, "preload", buf, 0); av_dict_set(&dict, "max_delay", "200000", 0); av_dict_set(&dict, "muxrate", "10080000", 0); #ifndef _WIN32 // does not work on windows as the file must be opened twice at the same time av_dict_set(&dict, "movflags","faststart",0); #endif ADM_assert(avformat_write_header(oc, &dict) >= 0); ADM_info("Timebase codec = %d/%d\n",c->time_base.num,c->time_base.den); ADM_info("Timebase stream = %d/%d\n",video_st->time_base.num,video_st->time_base.den); if(myTimeBase.den==video_st->time_base.den && video_st->time_base.num==1) { roundup=myTimeBase.num; ADM_warning("Timebase roundup = %d\n",roundup); } av_dict_free(&dict); vStream=s; aStreams=a; nbAStreams=nbAudioTrack; initialized=true; return true; }
int main(int argc, char **argv) { double prev_segment_time = 0; unsigned int output_index = 1; AVInputFormat *ifmt; AVOutputFormat *ofmt; AVFormatContext *ic = NULL; AVFormatContext *oc; AVStream *video_st = NULL; AVStream *audio_st = NULL; char *output_filename; int video_index = -1; int audio_index = -1; unsigned int first_segment = 1; unsigned int last_segment = 0; int decode_done; char *dot; int ret; unsigned int i; int remove_file; struct sigaction act; int64_t timestamp; int opt; int longindex; char *endptr; struct options_t options; /* Usage: recorder [options] Options: -T SOCKETTIMEOUT, --sockettimeout=SOCKETTIMEOUT Socket timeout (default: 30) -B SOCKETBUFFER, --socketbuffer=SOCKETBUFFER Socket buffer in bytes(default: 1500) -v VERBOSE, --verbose=VERBOSE Verbosity level (default: info) (ops: ['emerg', 'alert', 'crit', 'err', 'warning', 'notice', 'info', 'debug']) -L LOGFILE, --logfile=LOGFILE Log file (default: ./recorder.log) /root/npvr/recorder -v info -L /var/log/npvr/recorder.udctvlive00202.log -w /mfs/npvr/storage/stream/pvr/ -C 10 -K udctvlive00202 -P http -U http://10.14.10.102:8082/stream/udctvlive00202 /mnt/mfs/npvr/storage/stream/pvr/ts/rcclive001/1359968328_10_19.ts */ static const char *optstring = "i:C:K:w:s:ovh?"; static const struct option longopts[] = { { "input", required_argument, NULL, 'i' }, { "duration", required_argument, NULL, 'C' }, { "key", required_argument, NULL, 'K' }, { "workdir", required_argument, NULL, 'w' }, { "stop", no_argument, NULL, 's' }, { "help", no_argument, NULL, 'h' }, { 0, 0, 0, 0 } }; memset(&options, 0 ,sizeof(options)); /* Set some defaults */ options.segment_duration = 10; options.stop = 0; do { opt = getopt_long(argc, argv, optstring, longopts, &longindex ); switch (opt) { case 'i': options.input_file = optarg; if (!strcmp(options.input_file, "-")) { options.input_file = "pipe:"; } break; case 'C': options.segment_duration = strtol(optarg, &endptr, 10); if (optarg == endptr || options.segment_duration < 0 || options.segment_duration == -LONG_MAX) { fprintf(stderr, "Segment duration time (%s) invalid\n", optarg); exit(1); } break; case 'K': options.key = optarg; break; case 'w': options.workdir = optarg; break; case 's': options.stop = 1; break; case 'h': display_usage(); break; } } while (opt != -1); /* Check required args where set*/ if (options.input_file == NULL) { fprintf(stderr, "Please specify an input file.\n"); exit(1); } if (options.key == NULL) { fprintf(stderr, "Please specify an output prefix.\n"); exit(1); } if (options.workdir == NULL) { fprintf(stderr, "Please working directory.\n"); exit(1); } avformat_network_init(); av_register_all(); output_filename = malloc(sizeof(char) * (strlen(options.workdir) + strlen(options.key) + 15)); if (!output_filename) { fprintf(stderr, "Could not allocate space for output filenames\n"); exit(1); } ifmt = av_find_input_format("mpegts"); if (!ifmt) { fprintf(stderr, "Could not find MPEG-TS demuxer\n"); exit(1); } open_context(&ic, options.input_file, options.key, ifmt, &ofmt, &oc, &video_st, &audio_st, &video_index, &audio_index); timestamp = av_gettime() / 1000000; snprintf(output_filename, strlen(options.workdir) + strlen(options.key) + 75, "%s/ts/%s/%d_%d_%u.ts", options.workdir, options.key, (int)timestamp, (int)options.segment_duration, output_index++); if (avio_open(&oc->pb, output_filename, AVIO_FLAG_WRITE) < 0) { fprintf(stderr, "Could not open '%s'\n", output_filename); exit(1); } if (avformat_write_header(oc, NULL)) { fprintf(stderr, "Could not write mpegts header to first output file\n"); exit(1); } /* Setup signals */ memset(&act, 0, sizeof(act)); act.sa_handler = &handler; sigaction(SIGINT, &act, NULL); sigaction(SIGTERM, &act, NULL); do { double segment_time = prev_segment_time; AVPacket packet; if (terminate) { break; } decode_done = av_read_frame(ic, &packet); if (decode_done < 0) { break; } if (av_dup_packet(&packet) < 0) { fprintf(stderr, "Could not duplicate packet"); av_free_packet(&packet); break; } // Use video stream as time base and split at keyframes. Otherwise use audio stream if (packet.stream_index == video_index && (packet.flags & AV_PKT_FLAG_KEY)) { segment_time = packet.pts * av_q2d(video_st->time_base); } else if (video_index < 0) { segment_time = packet.pts * av_q2d(audio_st->time_base); } else { segment_time = prev_segment_time; } if (segment_time - prev_segment_time >= options.segment_duration) { av_write_trailer(oc); // close ts file and free memory avio_flush(oc->pb); avio_close(oc->pb); timestamp = av_gettime() / 1000000; snprintf(output_filename, strlen(options.workdir) + strlen(options.key) + 75, "%s/ts/%s/%d_%d_%u.ts", options.workdir, options.key, (int)timestamp, (int)options.segment_duration, output_index++); if (avio_open(&oc->pb, output_filename, AVIO_FLAG_WRITE) < 0) { fprintf(stderr, "Could not open '%s'\n", output_filename); break; } // Write a new header at the start of each file if (avformat_write_header(oc, NULL)) { fprintf(stderr, "Could not write mpegts header to first output file\n"); exit(1); } prev_segment_time = segment_time; } ret = av_interleaved_write_frame(oc, &packet); if (ret < 0) { fprintf(stderr, "Warning: Could not write frame of stream\n"); } else if (ret > 0) { fprintf(stderr, "End of stream requested\n"); av_free_packet(&packet); break; } av_free_packet(&packet); } while (1); close_context(&oc, &video_st); return 0; }
int open_output_file() { AVStream *outStream = NULL; AVStream *inStream = NULL; AVCodecContext *decCtx = NULL, *encCtx = NULL; AVOutputFormat *ofmt = NULL; AVCodec *encoder = NULL; int ret; int streamIdx = 0; unsigned int i; avformat_alloc_output_context2(&outFmtCtx, NULL, NULL, outFilename); if (!outFmtCtx) { av_log(NULL, AV_LOG_ERROR, "Could not create output context\n"); return AVERROR_UNKNOWN; } ofmt = outFmtCtx->oformat; ofmt->video_codec = ENCODER_ID; if (ofmt->video_codec != AV_CODEC_ID_NONE) outStream = add_stream(inStream, ofmt->video_codec, &encoder); if (outStream) { encCtx = outStream->codec; ret = avcodec_open2(encCtx, encoder, NULL); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "Could not open video codec\n"); return ret; } } av_dump_format(outFmtCtx, 0, outFilename, 1); if (!(outFmtCtx->oformat->flags & AVFMT_NOFILE)) { ret = avio_open(&outFmtCtx->pb, outFilename, AVIO_FLAG_WRITE); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", outFilename); return ret; } } //initialize muxer, write output file header ret = avformat_write_header(outFmtCtx, NULL); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n"); return ret; } outFmtCtx->streams[streamIdx]->codec->time_base.den = time_base_den; outFmtCtx->streams[streamIdx]->codec->time_base.num = time_base_num; return 0; }
int main(int argc, char *argv[]) { // Decoder local variable declaration AVFormatContext *pFormatCtx = NULL; int i, videoStream; AVCodecContext *pCodecCtx = NULL; AVCodec *pCodec; AVFrame *pFrame; AVPacket packet; int frameFinished; // Encoder local variable declaration const char *filename; AVOutputFormat *fmt; AVFormatContext *oc; AVStream *video_st; AVCodec *video_codec; int ret, frame_count; StreamInfo sInfo; // Register all formats, codecs and network av_register_all(); avcodec_register_all(); avformat_network_init(); // Open video file if (avformat_open_input(&pFormatCtx, "input_file.wmv", NULL, NULL) != 0) return -1; // Couldn't open file // Retrieve stream information if (avformat_find_stream_info(pFormatCtx, NULL) < 0) return -1; // Couldn't find stream information // Dump information about file onto standard error av_dump_format(pFormatCtx, 0, "input_file.wmv", 0); // Find the first video stream videoStream = -1; for (i = 0; i < pFormatCtx->nb_streams; i++) if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) { videoStream = i; break; } if (videoStream == -1) return -1; // Didn't find a video stream // Get a pointer to the codec context for the video stream pCodecCtx = pFormatCtx->streams[videoStream]->codec; // Find the decoder for the video stream pCodec = avcodec_find_decoder(pCodecCtx->codec_id); if (pCodec == NULL) { fprintf(stderr, "Unsupported codec!\n"); return -1; // Codec not found } // Open codec (decoder) if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) return -1; // Could not open codec // Allocate video frame pFrame = avcodec_alloc_frame(); // Setup mux filename = "output_file.flv"; // To stream to a media server (e.g. FMS) // filename = "rtmp://chineseforall.org/live/beta"; fmt = av_guess_format("flv", filename, NULL); if (fmt == NULL) { printf("Could not guess format.\n"); return -1; } // allocate the output media context oc = avformat_alloc_context(); if (oc == NULL) { printf("could not allocate context.\n"); return -1; } // Set output format context to the format ffmpeg guessed oc->oformat = fmt; // Add the video stream using the h.264 // codec and initialize the codec. video_st = NULL; sInfo.width = pFormatCtx->streams[i]->codec->width; sInfo.height = pFormatCtx->streams[i]->codec->height; sInfo.pix_fmt = AV_PIX_FMT_YUV420P; sInfo.frame_rate = 30; sInfo.bitrate = 450*1000; video_st = add_stream(oc, &video_codec, AV_CODEC_ID_H264, &sInfo); // Now that all the parameters are set, we can open the audio and // video codecs and allocate the necessary encode buffers. if (video_st) open_video(oc, video_codec, video_st); /* open the output file, if needed */ if (!(fmt->flags & AVFMT_NOFILE)) { ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE); if (ret < 0) { fprintf(stderr, "Could not open '%s': %s\n", filename, av_err2str(ret)); return 1; } } // dump output format av_dump_format(oc, 0, filename, 1); // Write the stream header, if any. ret = avformat_write_header(oc, NULL); if (ret < 0) { fprintf(stderr, "Error occurred when opening output file: %s\n", av_err2str(ret)); return 1; } // Read frames, decode, and re-encode frame_count = 1; while (av_read_frame(pFormatCtx, &packet) >= 0) { // Is this a packet from the video stream? if (packet.stream_index == videoStream) { // Decode video frame avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet); // Did we get a video frame? if (frameFinished) { // Initialize a new frame AVFrame* newFrame = avcodec_alloc_frame(); int size = avpicture_get_size(video_st->codec->pix_fmt, video_st->codec->width, video_st->codec->height); uint8_t* picture_buf = av_malloc(size); avpicture_fill((AVPicture *) newFrame, picture_buf, video_st->codec->pix_fmt, video_st->codec->width, video_st->codec->height); // Copy only the frame content without additional fields av_picture_copy((AVPicture*) newFrame, (AVPicture*) pFrame, video_st->codec->pix_fmt, video_st->codec->width, video_st->codec->height); // encode the image AVPacket pkt; int got_output; av_init_packet(&pkt); pkt.data = NULL; // packet data will be allocated by the encoder pkt.size = 0; // Set the frame's pts (this prevents the warning notice 'non-strictly-monotonic PTS' newFrame->pts = frame_count; ret = avcodec_encode_video2(video_st->codec, &pkt, newFrame, &got_output); if (ret < 0) { fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret)); exit(1); } if (got_output) { if (video_st->codec->coded_frame->key_frame) pkt.flags |= AV_PKT_FLAG_KEY; pkt.stream_index = video_st->index; if (pkt.pts != AV_NOPTS_VALUE) pkt.pts = av_rescale_q(pkt.pts, video_st->codec->time_base, video_st->time_base); if (pkt.dts != AV_NOPTS_VALUE) pkt.dts = av_rescale_q(pkt.dts, video_st->codec->time_base, video_st->time_base); // Write the compressed frame to the media file. ret = av_interleaved_write_frame(oc, &pkt); } else { ret = 0; } if (ret != 0) { fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret)); exit(1); } fprintf(stderr, "encoded frame #%d\n", frame_count); frame_count++; // Free the YUV picture frame we copied from the // decoder to eliminate the additional fields // and other packets/frames used av_free(picture_buf); av_free_packet(&pkt); av_free(newFrame); } } // Free the packet that was allocated by av_read_frame av_free_packet(&packet); } /* Write the trailer, if any. The trailer must be written before you * close the CodecContexts open when you wrote the header; otherwise * av_write_trailer() may try to use memory that was freed on * av_codec_close(). */ av_write_trailer(oc); /* Close the video codec (encoder) */ if (video_st) close_video(oc, video_st); // Free the output streams. for (i = 0; i < oc->nb_streams; i++) { av_freep(&oc->streams[i]->codec); av_freep(&oc->streams[i]); } if (!(fmt->flags & AVFMT_NOFILE)) /* Close the output file. */ avio_close(oc->pb); /* free the output format context */ av_free(oc); // Free the YUV frame populated by the decoder av_free(pFrame); // Close the video codec (decoder) avcodec_close(pCodecCtx); // Close the input video file avformat_close_input(&pFormatCtx); return 0; }
int main(int argc, char ** argv) { if(argc < 4) { printf("\nScrub, you need to specify a bitrate, number of frames, and server." "\nLike this: pixieHD 350 1000 rtmp://domain.com/live/matt\n" "\nNOTE, it is: progname bitrate frames server\n\n" "The bitrate is understood to be kbits/sec.\n" "You should enter frames or else you the program will\n" "continue to stream until you forcefully close it.\n" "THANK YOU: while(1) { /* stream! */ }\n"); return 0; } printf("\nYou have set the following options:\n\n%5cbitrate: %s," "\n%5cframes: %s\n%5cserver: %s\n\n", ' ',argv[1],' ',argv[2],' ',argv[3]); /*int p; printf("Initializing noob options"); for(p=0; p<3; ++p) { printf("%5c",'.'); Sleep(1500); } printf("\n\n"); char *input; printf("You hating on my GFX or wat? Please Answer: "); input = getline(); printf("\n\n"); printf("Your answer: "); size_t input_len = strlen(input); for(p=0; p<input_len; ++p) { Sleep(300); printf("%c",input[p]); } printf("\nkk here we go..."); Sleep(1000);*/ printf("\n\nPress the CONTROL key to begin streaming or ESC key to QUIT.\n\n"); while (1) { if (ButtonPress(VK_ESCAPE)) { printf("Quit.\n\n"); break; } else if (ButtonPress(VK_CONTROL)) { // Decoder local variable declaration AVFormatContext *pFormatCtx = NULL; int i, videoStream; AVCodecContext *pCodecCtx = NULL; AVCodec *pCodec; AVFrame *pFrame; AVPacket packet; int frameFinished; // Encoder local variable declaration const char *filename; AVOutputFormat *fmt; AVFormatContext *oc; AVStream *video_st; AVCodec *video_codec; int ret; unsigned int frame_count, frame_count2; StreamInfo sInfo; size_t max_frames = strtol(argv[2], NULL, 0); // Register all formats, codecs and network av_register_all(); avcodec_register_all(); avformat_network_init(); // Setup mux //filename = "output_file.flv"; //filename = "rtmp://chineseforall.org/live/beta"; filename = argv[3]; fmt = av_guess_format("flv", filename, NULL); if (fmt == NULL) { printf("Could not guess format.\n"); return -1; } // allocate the output media context oc = avformat_alloc_context(); if (oc == NULL) { printf("could not allocate context.\n"); return -1; } HDC hScreen = GetDC(GetDesktopWindow()); ScreenX = GetDeviceCaps(hScreen, HORZRES); ScreenY = GetDeviceCaps(hScreen, VERTRES); // Temp. hard-code the resolution int new_width = 1024, new_height = 576; double v_ratio = 1.7786458333333333333333333333333; // Set output format context to the format ffmpeg guessed oc->oformat = fmt; // Add the video stream using the h.264 // codec and initialize the codec. video_st = NULL; sInfo.width = new_width; sInfo.height = new_height; sInfo.pix_fmt = AV_PIX_FMT_YUV420P; sInfo.frame_rate = 10; sInfo.bitrate = strtol(argv[1], NULL, 0)*1000; video_st = add_stream(oc, &video_codec, AV_CODEC_ID_H264, &sInfo); // Now that all the parameters are set, we can open the audio and // video codecs and allocate the necessary encode buffers. if (video_st) open_video(oc, video_codec, video_st); /* open the output file, if needed */ if (!(fmt->flags & AVFMT_NOFILE)) { ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE); if (ret < 0) { fprintf(stderr, "Could not open '%s': %s\n", filename, av_err2str(ret)); return 1; } } // dump output format av_dump_format(oc, 0, filename, 1); // Write the stream header, if any. ret = avformat_write_header(oc, NULL); if (ret < 0) { fprintf(stderr, "Error occurred when opening output file: %s\n", av_err2str(ret)); return 1; } // Read frames, decode, and re-encode frame_count = 1; frame_count2 = 1; HDC hdcMem = CreateCompatibleDC (hScreen); HBITMAP hBitmap = CreateCompatibleBitmap(hScreen, ScreenX, ScreenY); HGDIOBJ hOld; BITMAPINFOHEADER bmi = {0}; bmi.biSize = sizeof(BITMAPINFOHEADER); bmi.biPlanes = 1; bmi.biBitCount = 32; bmi.biWidth = ScreenX; bmi.biHeight = -ScreenY; bmi.biCompression = BI_RGB; bmi.biSizeImage = 0;// 3 * ScreenX * ScreenY; if(ScreenData) free(ScreenData); ScreenData = (BYTE*)malloc(4 * ScreenX * ScreenY); AVPacket pkt; clock_t start_t = GetTickCount(); long long wait_time = 0; uint64_t total_size; while(1) { hOld = SelectObject(hdcMem, hBitmap); BitBlt(hdcMem, 0, 0, ScreenX, ScreenY, hScreen, 0, 0, SRCCOPY); SelectObject(hdcMem, hOld); GetDIBits(hdcMem, hBitmap, 0, ScreenY, ScreenData, (BITMAPINFO*)&bmi, DIB_RGB_COLORS); //calculate the bytes needed for the output image int nbytes = avpicture_get_size(AV_PIX_FMT_YUV420P, new_width, new_height); //create buffer for the output image uint8_t* outbuffer = (uint8_t*)av_malloc(nbytes); //create ffmpeg frame structures. These do not allocate space for image data, //just the pointers and other information about the image. AVFrame* inpic = avcodec_alloc_frame(); AVFrame* outpic = avcodec_alloc_frame(); //this will set the pointers in the frame structures to the right points in //the input and output buffers. avpicture_fill((AVPicture*)inpic, ScreenData, AV_PIX_FMT_RGB32, ScreenX, ScreenY); avpicture_fill((AVPicture*)outpic, outbuffer, AV_PIX_FMT_YUV420P, new_width, new_height); //create the conversion context struct SwsContext *fooContext = sws_getContext(ScreenX, ScreenY, AV_PIX_FMT_RGB32, new_width, new_height, AV_PIX_FMT_YUV420P, SWS_FAST_BILINEAR, NULL, NULL, NULL); //perform the conversion sws_scale(fooContext, inpic->data, inpic->linesize, 0, ScreenY, outpic->data, outpic->linesize); // Initialize a new frame AVFrame* newFrame = avcodec_alloc_frame(); int size = avpicture_get_size(video_st->codec->pix_fmt, video_st->codec->width, video_st->codec->height); uint8_t* picture_buf = av_malloc(size); avpicture_fill((AVPicture *) newFrame, picture_buf, video_st->codec->pix_fmt, video_st->codec->width, video_st->codec->height); // Copy only the frame content without additional fields av_picture_copy((AVPicture*) newFrame, (AVPicture*) outpic, video_st->codec->pix_fmt, video_st->codec->width, video_st->codec->height); // encode the image int got_output; av_init_packet(&pkt); pkt.data = NULL; // packet data will be allocated by the encoder pkt.size = 0; // Set the frame's pts (this prevents the warning notice 'non-strictly-monotonic PTS') newFrame->pts = frame_count2; ret = avcodec_encode_video2(video_st->codec, &pkt, newFrame, &got_output); if (ret < 0) { fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret)); exit(1); } if (got_output) { if (video_st->codec->coded_frame->key_frame) pkt.flags |= AV_PKT_FLAG_KEY; pkt.stream_index = video_st->index; if (pkt.pts != AV_NOPTS_VALUE) pkt.pts = av_rescale_q(pkt.pts, video_st->codec->time_base, video_st->time_base); if (pkt.dts != AV_NOPTS_VALUE) pkt.dts = av_rescale_q(pkt.dts, video_st->codec->time_base, video_st->time_base); // Write the compressed frame to the media file. ret = av_interleaved_write_frame(oc, &pkt); fprintf(stderr, "encoded frame #%d\n", frame_count); frame_count++; } else { ret = 0; } if (ret != 0) { fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret)); exit(1); } ++frame_count2; // Free the YUV picture frame we copied from the // decoder to eliminate the additional fields // and other packets/frames used av_free(picture_buf); av_free_packet(&pkt); av_free(newFrame); //free memory av_free(outbuffer); av_free(inpic); av_free(outpic); if(frame_count == max_frames) { /* Write the trailer, if any. The trailer must be written before you * close the CodecContexts open when you wrote the header; otherwise * av_write_trailer() may try to use memory that was freed on * av_codec_close(). */ av_write_trailer(oc); /* Close the video codec (encoder) */ if (video_st) { close_video(oc, video_st); } // Free the output streams. for (i = 0; i < oc->nb_streams; i++) { av_freep(&oc->streams[i]->codec); av_freep(&oc->streams[i]); } if (!(fmt->flags & AVFMT_NOFILE)) { /* Close the output file. */ avio_close(oc->pb); } /* free the output format context */ av_free(oc); ReleaseDC(GetDesktopWindow(),hScreen); DeleteDC(hdcMem); printf("\n\nPress the CONTROL key to begin streaming or ESC key to QUIT.\n\n"); break; } } } } return 0; }
void VideoStream::OpenStream() { /* now that all the parameters are set, we can open the video codecs and allocate the necessary encode buffers */ if ( ost ) { #if ZM_FFMPEG_SVN AVCodecContext *c = ost->codec; #else AVCodecContext *c = &ost->codec; #endif /* find the video encoder */ AVCodec *codec = avcodec_find_encoder(c->codec_id); if ( !codec ) { Panic( "codec not found" ); } /* open the codec */ if ( avcodec_open(c, codec) < 0 ) { Panic( "Could not open codec" ); } /* allocate the encoded raw picture */ opicture = avcodec_alloc_frame(); if ( !opicture ) { Panic( "Could not allocate opicture" ); } int size = avpicture_get_size( c->pix_fmt, c->width, c->height); uint8_t *opicture_buf = (uint8_t *)malloc(size); if ( !opicture_buf ) { av_free(opicture); Panic( "Could not allocate opicture" ); } avpicture_fill( (AVPicture *)opicture, opicture_buf, c->pix_fmt, c->width, c->height ); /* if the output format is not RGB24, then a temporary RGB24 picture is needed too. It is then converted to the required output format */ tmp_opicture = NULL; if ( c->pix_fmt != pf ) { tmp_opicture = avcodec_alloc_frame(); if ( !tmp_opicture ) { Panic( "Could not allocate temporary opicture" ); } int size = avpicture_get_size( pf, c->width, c->height); uint8_t *tmp_opicture_buf = (uint8_t *)malloc(size); if (!tmp_opicture_buf) { av_free( tmp_opicture ); Panic( "Could not allocate temporary opicture" ); } avpicture_fill( (AVPicture *)tmp_opicture, tmp_opicture_buf, pf, c->width, c->height ); } } /* open the output file, if needed */ if ( !(of->flags & AVFMT_NOFILE) ) { #if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51,2,1) if ( avio_open(&ofc->pb, filename, URL_WRONLY) < 0 ) #else if ( url_fopen(&ofc->pb, filename, URL_WRONLY) < 0 ) #endif { Fatal( "Could not open '%s'", filename ); } } video_outbuf = NULL; if ( !(ofc->oformat->flags & AVFMT_RAWPICTURE) ) { /* allocate output buffer */ /* XXX: API change will be done */ video_outbuf_size = 200000; video_outbuf = (uint8_t *)malloc(video_outbuf_size); } /* write the stream header, if any */ av_write_header(ofc); }
static int read_packet(AVFormatContext *s1, AVPacket *pkt) { VideoData *s = s1->priv_data; char filename[1024]; int i; int size[3] = {0}, ret[3] = {0}; AVIOContext *f[3]; AVCodecContext *codec = s1->streams[0]->codec; if (!s->is_pipe) { /* loop over input */ if (s1->loop_input && s->img_number > s->img_last) { s->img_number = s->img_first; } if (s->img_number > s->img_last) return AVERROR_EOF; if (av_get_frame_filename(filename, sizeof(filename), s->path, s->img_number) < 0 && s->img_number > 1) return AVERROR(EIO); for(i = 0; i < 3; i++) { if (avio_open(&f[i], filename, AVIO_RDONLY) < 0) { if(i == 1) break; av_log(s1, AV_LOG_ERROR, "Could not open file : %s\n", filename); return AVERROR(EIO); } size[i] = avio_size(f[i]); if(!s->split_planes) break; filename[ strlen(filename) - 1 ] = 'U' + i; } if(codec->codec_id == CODEC_ID_RAWVIDEO && !codec->width) infer_size(&codec->width, &codec->height, size[0]); } else { f[0] = s1->pb; if (url_feof(f[0])) return AVERROR(EIO); size[0] = 4096; } av_new_packet(pkt, size[0] + size[1] + size[2]); pkt->stream_index = 0; pkt->flags |= AV_PKT_FLAG_KEY; pkt->size = 0; for(i = 0; i < 3; i++) { if(size[i]) { ret[i] = avio_read(f[i], pkt->data + pkt->size, size[i]); if (!s->is_pipe) avio_close(f[i]); if(ret[i] > 0) pkt->size += ret[i]; } } if (ret[0] <= 0 || ret[1] < 0 || ret[2] < 0) { av_free_packet(pkt); return AVERROR(EIO); /* signal EOF */ } else { s->img_count++; s->img_number++; return 0; } }
static int open_f(stream_t *stream, int mode, void *opts, int *file_format) { int flags = 0; const char *filename; AVIOContext *ctx = NULL; int res = STREAM_ERROR; int64_t size; int dummy; init_avformat(); if (mode == STREAM_READ) flags = AVIO_FLAG_READ; else if (mode == STREAM_WRITE) flags = AVIO_FLAG_WRITE; else { mp_msg(MSGT_OPEN, MSGL_ERR, "[ffmpeg] Unknown open mode %d\n", mode); res = STREAM_UNSUPPORTED; goto out; } #ifdef AVIO_FLAG_DIRECT flags |= AVIO_FLAG_DIRECT; #else mp_msg(MSGT_OPEN, MSGL_WARN, "[ffmpeg] No support for AVIO_FLAG_DIRECT, might cause performance and other issues.\n" "Please update to and rebuild against an FFmpeg version supporting it.\n"); #endif if (stream->url) filename = stream->url; else { mp_msg(MSGT_OPEN, MSGL_ERR, "[ffmpeg] No URL\n"); goto out; } if (!strncmp(filename, prefix, strlen(prefix))) filename += strlen(prefix); dummy = !strncmp(filename, "rtsp:", 5) || !strncmp(filename, "dummy:", 6); mp_msg(MSGT_OPEN, MSGL_V, "[ffmpeg] Opening %s\n", filename); if (!dummy && avio_open(&ctx, filename, flags) < 0) goto out; stream->priv = ctx; size = dummy ? 0 : avio_size(ctx); if (size >= 0) stream->end_pos = size; stream->type = STREAMTYPE_FILE; stream->seek = seek; if (dummy || !ctx->seekable) { stream->type = STREAMTYPE_STREAM; stream->seek = NULL; } if (dummy) { *file_format = DEMUXER_TYPE_LAVF; } else { stream->fill_buffer = fill_buffer; stream->write_buffer = write_buffer; stream->control = control; stream->close = close_f; } res = STREAM_OK; out: return res; }
int encode_lavc_start(struct encode_lavc_context *ctx) { AVDictionaryEntry *de; if (ctx->header_written < 0) return 0; if (ctx->header_written > 0) return 1; CHECK_FAIL(ctx, 0); if (ctx->expect_video && ctx->vcc == NULL) { if (ctx->avc->oformat->video_codec != AV_CODEC_ID_NONE || ctx->options->vcodec) { encode_lavc_fail(ctx, "no video stream succeeded - invalid codec?\n"); return 0; } } if (ctx->expect_audio && ctx->acc == NULL) { if (ctx->avc->oformat->audio_codec != AV_CODEC_ID_NONE || ctx->options->acodec) { encode_lavc_fail(ctx, "no audio stream succeeded - invalid codec?\n"); return 0; } } ctx->header_written = -1; if (!(ctx->avc->oformat->flags & AVFMT_NOFILE)) { MP_INFO(ctx, "Opening output file: %s\n", ctx->avc->filename); if (avio_open(&ctx->avc->pb, ctx->avc->filename, AVIO_FLAG_WRITE) < 0) { encode_lavc_fail(ctx, "could not open '%s'\n", ctx->avc->filename); return 0; } } ctx->t0 = mp_time_sec(); MP_INFO(ctx, "Opening muxer: %s [%s]\n", ctx->avc->oformat->long_name, ctx->avc->oformat->name); if (ctx->metadata) { for (int i = 0; i < ctx->metadata->num_keys; i++) { av_dict_set(&ctx->avc->metadata, ctx->metadata->keys[i], ctx->metadata->values[i], 0); } } if (avformat_write_header(ctx->avc, &ctx->foptions) < 0) { encode_lavc_fail(ctx, "could not write header\n"); return 0; } for (de = NULL; (de = av_dict_get(ctx->foptions, "", de, AV_DICT_IGNORE_SUFFIX));) MP_WARN(ctx, "ofopts: key '%s' not found.\n", de->key); av_dict_free(&ctx->foptions); ctx->header_written = 1; return 1; }
AVWRAP_DECL int AVWrapper_Init( void (*pAddFileLogRaw)(const char*), const char* pFilename, const char* pDesc, const char* pSoundFile, const char* pFormatName, const char* pVCodecName, const char* pACodecName, int Width, int Height, int FramerateNum, int FramerateDen, int VQuality) { int ret; AddFileLogRaw = pAddFileLogRaw; av_log_set_callback( &LogCallback ); g_Width = Width; g_Height = Height; g_Framerate.num = FramerateNum; g_Framerate.den = FramerateDen; g_VQuality = VQuality; // initialize libav and register all codecs and formats av_register_all(); // find format g_pFormat = av_guess_format(pFormatName, NULL, NULL); if (!g_pFormat) return FatalError("Format \"%s\" was not found", pFormatName); // allocate the output media context g_pContainer = avformat_alloc_context(); if (!g_pContainer) return FatalError("Could not allocate output context"); g_pContainer->oformat = g_pFormat; // store description of file av_dict_set(&g_pContainer->metadata, "comment", pDesc, 0); // append extesnion to filename char ext[16]; strncpy(ext, g_pFormat->extensions, 16); ext[15] = 0; ext[strcspn(ext,",")] = 0; snprintf(g_pContainer->filename, sizeof(g_pContainer->filename), "%s.%s", pFilename, ext); // find codecs g_pVCodec = avcodec_find_encoder_by_name(pVCodecName); g_pACodec = avcodec_find_encoder_by_name(pACodecName); // add audio and video stream to container g_pVStream = NULL; g_pAStream = NULL; if (g_pVCodec) { ret = AddVideoStream(); if (ret < 0) return ret; } else Log("Video codec \"%s\" was not found; video will be ignored.\n", pVCodecName); if (g_pACodec) { g_pSoundFile = fopen(pSoundFile, "rb"); if (g_pSoundFile) { fread(&g_Frequency, 4, 1, g_pSoundFile); fread(&g_Channels, 4, 1, g_pSoundFile); AddAudioStream(); } else Log("Could not open %s\n", pSoundFile); } else Log("Audio codec \"%s\" was not found; audio will be ignored.\n", pACodecName); if (!g_pAStream && !g_pVStream) return FatalError("No video, no audio, aborting..."); // write format info to log av_dump_format(g_pContainer, 0, g_pContainer->filename, 1); // open the output file, if needed if (!(g_pFormat->flags & AVFMT_NOFILE)) { if (avio_open(&g_pContainer->pb, g_pContainer->filename, AVIO_FLAG_WRITE) < 0) return FatalError("Could not open output file (%s)", g_pContainer->filename); } g_pVFrame->pts = -1; // write the stream header, if any return avformat_write_header(g_pContainer, NULL); }
int muxer_mp4(void* noUse) { AVOutputFormat *ofmt = NULL; //Input AVFormatContext and Output AVFormatContext AVFormatContext *ifmt_ctx_v = NULL, *ifmt_ctx_a = NULL, *ofmt_ctx = NULL; AVPacket pkt; int ret, i; int videoindex_v = -1, videoindex_out = -1; int audioindex_a = -1, audioindex_out = -1; int frame_index = 0; int64_t cur_pts_v = 0, cur_pts_a = 0; //const char *in_filename_v = "cuc_ieschool.ts";//Input file URL const char *in_filename_v = "../testResource/bigbuckbunny_480x272.h264"; //const char *in_filename_a = "cuc_ieschool.mp3"; //const char *in_filename_a = "gowest.m4a"; //const char *in_filename_a = "gowest.aac"; const char *in_filename_a = "../testResource/WavinFlag.aac"; const char *out_filename = "bigbuckbunny.mp4";//Output file URL av_register_all(); //Input if ((ret = avformat_open_input(&ifmt_ctx_v, in_filename_v, 0, 0)) < 0) { printf("Could not open input file."); goto end; } if ((ret = avformat_find_stream_info(ifmt_ctx_v, 0)) < 0) { printf("Failed to retrieve input stream information"); goto end; } if ((ret = avformat_open_input(&ifmt_ctx_a, in_filename_a, 0, 0)) < 0) { printf("Could not open input file."); goto end; } if ((ret = avformat_find_stream_info(ifmt_ctx_a, 0)) < 0) { printf("Failed to retrieve input stream information"); goto end; } printf("===========Input Information==========\n"); av_dump_format(ifmt_ctx_v, 0, in_filename_v, 0); av_dump_format(ifmt_ctx_a, 0, in_filename_a, 0); printf("======================================\n"); //Output avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename); if (!ofmt_ctx) { printf("Could not create output context\n"); ret = AVERROR_UNKNOWN; goto end; } ofmt = ofmt_ctx->oformat; unsigned char* outbuffer = NULL; outbuffer = (unsigned char*)av_malloc(32768); AVIOContext *avio_out = avio_alloc_context(outbuffer, 32768, 0, NULL, NULL, write_buffer, NULL); if (avio_out == NULL) goto end; ofmt_ctx->pb = avio_out; ofmt_ctx->flags = AVFMT_FLAG_CUSTOM_IO; for (i = 0; i < ifmt_ctx_v->nb_streams; i++) { //Create output AVStream according to input AVStream if (ifmt_ctx_v->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) { AVStream *in_stream = ifmt_ctx_v->streams[i]; AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec); videoindex_v = i; if (!out_stream) { printf("Failed allocating output stream\n"); ret = AVERROR_UNKNOWN; goto end; } videoindex_out = out_stream->index; //Copy the settings of AVCodecContext if (avcodec_copy_context(out_stream->codec, in_stream->codec) < 0) { printf("Failed to copy context from input to output stream codec context\n"); goto end; } out_stream->codec->codec_tag = 0; if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; break; } } for (i = 0; i < ifmt_ctx_a->nb_streams; i++) { //Create output AVStream according to input AVStream if (ifmt_ctx_a->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) { AVStream *in_stream = ifmt_ctx_a->streams[i]; AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec); audioindex_a = i; if (!out_stream) { printf("Failed allocating output stream\n"); ret = AVERROR_UNKNOWN; goto end; } audioindex_out = out_stream->index; //Copy the settings of AVCodecContext if (avcodec_copy_context(out_stream->codec, in_stream->codec) < 0) { printf("Failed to copy context from input to output stream codec context\n"); goto end; } out_stream->codec->codec_tag = 0; if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; break; } } printf("==========Output Information==========\n"); av_dump_format(ofmt_ctx, 0, out_filename, 1); printf("======================================\n"); //Open output file if (!(ofmt->flags & AVFMT_NOFILE)) { if (avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE) < 0) { printf("Could not open output file '%s'", out_filename); goto end; } } //Write file header if (avformat_write_header(ofmt_ctx, NULL) < 0) { printf("Error occurred when opening output file\n"); goto end; } //FIX #if USE_H264BSF AVBitStreamFilterContext* h264bsfc = av_bitstream_filter_init("h264_mp4toannexb"); #endif #if USE_AACBSF AVBitStreamFilterContext* aacbsfc = av_bitstream_filter_init("aac_adtstoasc"); #endif while (1) { AVFormatContext *ifmt_ctx; int stream_index = 0; AVStream *in_stream, *out_stream; //Get an AVPacket if (av_compare_ts(cur_pts_v, ifmt_ctx_v->streams[videoindex_v]->time_base, cur_pts_a, ifmt_ctx_a->streams[audioindex_a]->time_base) <= 0) { ifmt_ctx = ifmt_ctx_v; stream_index = videoindex_out; if (av_read_frame(ifmt_ctx, &pkt) >= 0) { do { in_stream = ifmt_ctx->streams[pkt.stream_index]; out_stream = ofmt_ctx->streams[stream_index]; if (pkt.stream_index == videoindex_v) { //FIX£ºNo PTS (Example: Raw H.264) //Simple Write PTS if (pkt.pts == AV_NOPTS_VALUE) { //Write PTS AVRational time_base1 = in_stream->time_base; //Duration between 2 frames (us) int64_t calc_duration = (double)AV_TIME_BASE / av_q2d(in_stream->r_frame_rate); //Parameters pkt.pts = (double)(frame_index*calc_duration) / (double)(av_q2d(time_base1)*AV_TIME_BASE); pkt.dts = pkt.pts; pkt.duration = (double)calc_duration / (double)(av_q2d(time_base1)*AV_TIME_BASE); frame_index++; } cur_pts_v = pkt.pts; break; } } while (av_read_frame(ifmt_ctx, &pkt) >= 0); } else { break; } } else { ifmt_ctx = ifmt_ctx_a; stream_index = audioindex_out; if (av_read_frame(ifmt_ctx, &pkt) >= 0) { do { in_stream = ifmt_ctx->streams[pkt.stream_index]; out_stream = ofmt_ctx->streams[stream_index]; if (pkt.stream_index == audioindex_a) { //FIX£ºNo PTS //Simple Write PTS if (pkt.pts == AV_NOPTS_VALUE) { //Write PTS AVRational time_base1 = in_stream->time_base; //Duration between 2 frames (us) int64_t calc_duration = (double)AV_TIME_BASE / av_q2d(in_stream->r_frame_rate); //Parameters pkt.pts = (double)(frame_index*calc_duration) / (double)(av_q2d(time_base1)*AV_TIME_BASE); pkt.dts = pkt.pts; pkt.duration = (double)calc_duration / (double)(av_q2d(time_base1)*AV_TIME_BASE); frame_index++; } cur_pts_a = pkt.pts; break; } } while (av_read_frame(ifmt_ctx, &pkt) >= 0); } else { break; } } //FIX:Bitstream Filter #if USE_H264BSF av_bitstream_filter_filter(h264bsfc, in_stream->codec, NULL, &pkt.data, &pkt.size, pkt.data, pkt.size, 0); #endif #if USE_AACBSF av_bitstream_filter_filter(aacbsfc, out_stream->codec, NULL, &pkt.data, &pkt.size, pkt.data, pkt.size, 0); #endif //Convert PTS/DTS pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)); pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)); pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base); pkt.pos = -1; pkt.stream_index = stream_index; printf("Write 1 Packet. size:%5d\tpts:%lld\n", pkt.size, pkt.pts); //Write if (av_interleaved_write_frame(ofmt_ctx, &pkt) < 0) { printf("Error muxing packet\n"); break; } av_free_packet(&pkt); } //Write file trailer av_write_trailer(ofmt_ctx); #if USE_H264BSF av_bitstream_filter_close(h264bsfc); #endif #if USE_AACBSF av_bitstream_filter_close(aacbsfc); #endif end: avformat_close_input(&ifmt_ctx_v); avformat_close_input(&ifmt_ctx_a); /* close output */ if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE)) avio_close(ofmt_ctx->pb); avformat_free_context(ofmt_ctx); if (ret < 0 && ret != AVERROR_EOF) { printf("Error occurred.\n"); return -1; } return 0; }
//---------------------------------------------------------------- // main_utf8 // int main_utf8(int argc, char **argv) { const char *input = NULL; const char *output_prefix = ""; double target_segment_duration = 0.0; char *segment_duration_check = NULL; const char *playlist_filename = NULL; const char *http_prefix = ""; long max_tsfiles = 0; char *max_tsfiles_check = NULL; double prev_segment_time = 0.0; double segment_duration = 0.0; unsigned int output_index = 1; const AVClass *fc = avformat_get_class(); AVDictionary *format_opts = NULL; AVOutputFormat *ofmt = NULL; AVFormatContext *ic = NULL; AVFormatContext *oc = NULL; AVStream *video_st = NULL; AVStream *audio_st = NULL; AVCodec *codec = NULL; char *output_filename = NULL; int if_save_keyframe = 0; //add by wanggm char *keyframeinfo_filename = NULL; //add by wanggm json_object *obj = NULL; //add by wanggm json_object *info_arr_obj = NULL; //add by wanggm int if_monitor_related_process = 0; //add by wanggm pid_t relatedProcessPid = 1; //add by wanggm char *pid_filename = NULL; int video_index = -1; int audio_index = -1; int kill_file = 0; int decode_done = 0; int ret = 0; int i = 0; TSMStreamLace * streamLace = NULL; TSMPlaylist * playlist = NULL; const double segment_duration_error_tolerance = 0.05; double extra_duration_needed = 0; int strict_segment_duration = 0; av_log_set_level(AV_LOG_INFO); for (i = 1; i < argc; i++) { if (strcmp(argv[i], "-i") == 0) { if ((argc - i) <= 1) usage(argv, "could not parse -i parameter"); i++; input = argv[i]; } else if (strcmp(argv[i], "-o") == 0) { if ((argc - i) <= 1) usage(argv, "could not parse -o parameter"); i++; output_prefix = argv[i]; } else if (strcmp(argv[i], "-d") == 0) { if ((argc - i) <= 1) usage(argv, "could not parse -d parameter"); i++; target_segment_duration = strtod(argv[i], &segment_duration_check); if (segment_duration_check == argv[i] || target_segment_duration == HUGE_VAL || target_segment_duration == -HUGE_VAL){ usage3(argv, "invalid segment duration: ", argv[i]); } } else if (strcmp(argv[i], "-x") == 0) { if ((argc - i) <= 1) usage(argv, "could not parse -x parameter"); i++; playlist_filename = argv[i]; } else if (strcmp(argv[i], "-p") == 0) { if ((argc - i) <= 1) usage(argv, "could not parse -p parameter"); i++; http_prefix = argv[i]; } else if (strcmp(argv[i], "-w") == 0) { if ((argc - i) <= 1) usage(argv, "could not parse -w parameter"); i++; max_tsfiles = strtol(argv[i], &max_tsfiles_check, 10); if (max_tsfiles_check == argv[i] || max_tsfiles < 0 || max_tsfiles >= INT_MAX) { usage3(argv, "invalid live stream max window size: ", argv[i]); } } else if (strcmp(argv[i], "-P") == 0) { if ((argc - i) <= 1) usage(argv, "could not parse -P parameter"); i++; pid_filename = argv[i]; } else if (strcmp(argv[i], "--watch-for-kill-file") == 0) { // end program when it finds a file with name 'kill': kill_file = 1; } else if (strcmp(argv[i], "--strict-segment-duration") == 0) { // force segment creation on non-keyframe boundaries: strict_segment_duration = 1; } else if (strcmp(argv[i], "--avformat-option") == 0) { const AVOption *of; const char *opt; const char *arg; if ((argc - i) <= 1) usage(argv, "could not parse --avformat-option parameter"); i++; opt = argv[i]; if ((argc - i) <= 1) usage(argv, "could not parse --avformat-option parameter"); i++; arg = argv[i]; if ((of = av_opt_find(&fc, opt, NULL, 0, AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ))) av_dict_set(&format_opts, opt, arg, (of->type == AV_OPT_TYPE_FLAGS) ? AV_DICT_APPEND : 0); else usage3(argv, "unknown --avformat-option parameter: ", opt); } else if (strcmp(argv[i], "--loglevel") == 0) { const char *arg; if ((argc - i) <= 1) usage(argv, "could not parse --loglevel parameter"); i++; arg = argv[i]; if (loglevel(arg)) usage3(argv, "unknown --loglevel parameter: ", arg); } else if (strcmp(argv[i], "-k") == 0) { //add by wanggm for save key frame information into json file. if ((argc - i) <= 1) usage(argv, "could not parse -k parameter"); i++; if_save_keyframe = atoi(argv[i]); } else if( strcmp(argv[i], "-s") == 0) {//add by wanggm for set the start index of ts file. if ( (argc -i ) <= 1) usage(argv, "could not parse -s parmeter"); i++; char *output_index_check = NULL; output_index = strtol(argv[i], &output_index_check, 10); if ( output_index_check== argv[i] || output_index < 0 || output_index >= INT_MAX) { usage3(argv, "invalid start index of ts file: ", argv[i]); } } else if( strcmp(argv[i], "-m") == 0) { // add by wanggm for exit by monitor the process of which pid is given. if ((argc - i) <= 1) usage(argv, "could not parse -m parmeter"); i++; if_monitor_related_process = 1; unsigned int tmpPid= atoi(argv[i]); if( tmpPid > 0) { relatedProcessPid = (pid_t) tmpPid; fprintf(stdout, "%s I will exit when the process PID= %d exit.\n", getSystemTime(timeChar), relatedProcessPid); } } } if (!input) { usage(argv, "-i input file parameter must be specified"); } if (!playlist_filename) { usage(argv, "-x m3u8 playlist file parameter must be specified"); } if (target_segment_duration == 0.0) { usage(argv, "-d segment duration parameter must be specified"); } if( output_index <= 0 ) { output_index = 1; } if( 1 == if_monitor_related_process) { pthread_t id; pthread_create(&id, NULL, (void*)monitor_process, relatedProcessPid); } // Create PID file if (pid_filename) { FILE* pid_file = fopen_utf8(pid_filename, "wb"); if (pid_file) { fprintf(pid_file, "%d", getpid()); fclose(pid_file); } } av_register_all(); avformat_network_init(); if (!strcmp(input, "-")) { input = "pipe:"; } output_filename = (char*) malloc( sizeof(char) * (strlen(output_prefix) + 15)); //add by wanggm if( if_save_keyframe == 1) { keyframeinfo_filename = (char*) malloc( sizeof(char)* (strlen(output_prefix) + 15)); } if (!output_filename || (1 == if_save_keyframe && !keyframeinfo_filename)) { fprintf(stderr, "%s Could not allocate space for output filenames\n", getSystemTime( timeChar)); goto error; } playlist = createPlaylist(max_tsfiles, target_segment_duration, http_prefix); if (!playlist) { fprintf(stderr, "%s Could not allocate space for m3u8 playlist structure\n", getSystemTime( timeChar)); goto error; } ret = avformat_open_input(&ic, input, NULL, (format_opts) ? &format_opts : NULL); if (ret != 0) { fprintf(stderr, "%sCould not open input file, make sure it is an mpegts or mp4 file: %d\n", getSystemTime(timeChar), ret); goto error; } av_dict_free(&format_opts); if (avformat_find_stream_info(ic, NULL) < 0) { fprintf(stderr, "%s Could not read stream information\n", getSystemTime( timeChar)); goto error; } #if LIBAVFORMAT_VERSION_MAJOR > 52 || (LIBAVFORMAT_VERSION_MAJOR == 52 && \ LIBAVFORMAT_VERSION_MINOR >= 45) ofmt = av_guess_format("mpegts", NULL, NULL); #else ofmt = guess_format("mpegts", NULL, NULL); #endif if (!ofmt) { fprintf(stderr, "%s Could not find MPEG-TS muxer\n", getSystemTime( timeChar)); goto error; } oc = avformat_alloc_context(); if (!oc) { fprintf(stderr, "%s Could not allocated output context\n", getSystemTime( timeChar)); goto error; } oc->oformat = ofmt; video_index = -1; audio_index = -1; for (i = 0; i < ic->nb_streams && (video_index < 0 || audio_index < 0); i++) { switch (ic->streams[i]->codec->codec_type) { case AVMEDIA_TYPE_VIDEO: video_index = i; ic->streams[i]->discard = AVDISCARD_NONE; video_st = add_output_stream(oc, ic->streams[i]); break; case AVMEDIA_TYPE_AUDIO: audio_index = i; ic->streams[i]->discard = AVDISCARD_NONE; audio_st = add_output_stream(oc, ic->streams[i]); break; default: ic->streams[i]->discard = AVDISCARD_ALL; break; } } av_dump_format(oc, 0, output_prefix, 1); if (video_index >= 0) { codec = avcodec_find_decoder(video_st->codec->codec_id); if (!codec) { fprintf(stderr, "%s Could not find video decoder, key frames will not be honored\n", getSystemTime( timeChar)); } if (avcodec_open2(video_st->codec, codec, NULL) < 0) { fprintf(stderr, "%s Could not open video decoder, key frames will not be honored\n", getSystemTime( timeChar)); } } snprintf(output_filename, strlen(output_prefix) + 15, "%s-%u.ts", output_prefix, output_index); if( 1 == if_save_keyframe) { snprintf(keyframeinfo_filename, strlen(output_prefix) + 15, "%s-%u.idx", output_prefix, output_index); obj = json_object_new_object(); info_arr_obj = create_json_header(obj); } if (avio_open(&oc->pb, output_filename, AVIO_FLAG_WRITE) < 0) { fprintf(stderr, "%s Could not open '%s'\n", getSystemTime( timeChar),output_filename); goto error; } if (avformat_write_header(oc, NULL)) { fprintf(stderr, "%s Could not write mpegts header to first output file\n", getSystemTime( timeChar)); goto error; } prev_segment_time = (double) (ic->start_time) / (double) (AV_TIME_BASE); streamLace = createStreamLace(ic->nb_streams); // add by houmr int continue_error_cnt = 0; //int pushcnt = 0; //int popcnt = 0; int tscnt = 0; int audiopktcnt = 0; int videopktcnt = 0; int kfcnt = 0; int errpktcnt = 0; ///////////////////////// do { double segment_time = 0.0; AVPacket packet; double packetStartTime = 0.0; double packetDuration = 0.0; if (!decode_done) { //fprintf(stdout, "%s av_read_frame() begin.\n", getSystemTime( timeChar)); decode_done = av_read_frame(ic, &packet); //fprintf(stdout, "%s av_read_frame() end. packet.size=%d stream_index=%d duration=%d\n", getSystemTime( timeChar), packet.size, packet.stream_index, packet.duration); //fprintf(stdout, "%s decode_done=%d\n", getSystemTime( timeChar),decode_done); if (!decode_done) { if (packet.stream_index != video_index && packet.stream_index != audio_index) { if( ++errpktcnt >= 10) { decode_done = 1; } fprintf(stderr, "%s packet is not video or audio, packet.stream_index=%d\n", getSystemTime( timeChar), packet.stream_index); av_free_packet(&packet); continue; } errpktcnt = 0; /*printf("orgin : index - %d\t pts = %s\t duration=%d\n", packet.stream_index, av_ts2str(packet.pts), packet.duration);*/ // add by houmr /*if (adjust_pts(&packet, video_index, audio_index) < 0) { av_free_packet(&packet); continue; } */ ///////////////////////////////////// double timeStamp = (double) (packet.pts) * (double) (ic->streams[packet.stream_index]->time_base.num) / (double) (ic->streams[packet.stream_index]->time_base.den); if (av_dup_packet(&packet) < 0) { fprintf(stderr, "%s Could not duplicate packet\n" ,getSystemTime( timeChar)); av_free_packet(&packet); break; } /* for(int i = 0; i < streamLace->numStreams; ++i) { fprintf(stdout, "streamLace[%d].size=%d\t", i, streamLace->streams[i]->size); } fprintf(stdout, "\n"); */ insertPacket(streamLace, &packet, timeStamp); } } if (countPackets(streamLace) < 50 && !decode_done) { /* allow the queue to fill up so that the packets can be sorted properly */ continue; } if (!removePacket(streamLace, &packet)) { fprintf(stdout, "%s get packet failed!!\n", getSystemTime( timeChar)); if (decode_done) { /* the queue is empty, we are done */ break; } assert(decode_done); continue; } //fprintf(stdout, "%s get 1 packet success. packet info: pts=%ld, dts=%ld\n", getSystemTime( timeChar), packet.pts, packet.dts); packetStartTime = (double) (packet.pts) * (double) (ic->streams[packet.stream_index]->time_base.num) / (double) (ic->streams[packet.stream_index]->time_base.den); packetDuration = (double) (packet.duration) * (double) (ic->streams[packet.stream_index]->time_base.num) / (double) (ic->streams[packet.stream_index]->time_base.den); #if !defined(NDEBUG) && (defined(DEBUG) || defined(_DEBUG)) if (av_log_get_level() >= AV_LOG_VERBOSE) fprintf(stderr, "%s stream %i, packet [%f, %f)\n", getSystemTime( timeChar), packet.stream_index, packetStartTime, packetStartTime + packetDuration); #endif segment_duration = packetStartTime + packetDuration - prev_segment_time; // NOTE: segments are supposed to start on a keyframe. // If the keyframe interval and segment duration do not match // forcing the segment creation for "better seeking behavior" // will result in decoding artifacts after seeking or stream switching. if (packet.stream_index == video_index && (packet.flags & AV_PKT_FLAG_KEY || strict_segment_duration)) { //This is video packet and ( packet is key frame or strict time is needed ) segment_time = packetStartTime; } else if (video_index < 0) { //This stream doesn't contain video stream segment_time = packetStartTime; } else { //This is a video packet or a video packet but not key frame segment_time = prev_segment_time; } //fprintf(stdout, "%s extra_duration_needed=%f\n", getSystemTime( timeChar), extra_duration_needed); if (segment_time - prev_segment_time + segment_duration_error_tolerance > target_segment_duration + extra_duration_needed) { fprintf(stdout, "%s segment_time=%lf prev_segment_time=%lf > target_segment_duration=%lf extra_duration_needed=%lf\n", getSystemTime( timeChar), segment_time, prev_segment_time, target_segment_duration, extra_duration_needed); fprintf(stdout, "%s File %s contains %d PES packet, of which %d are audio packet, %d are video packet within %d key frame.\n", getSystemTime( timeChar), output_filename, tscnt, audiopktcnt, videopktcnt, kfcnt); fflush(stdout); /* for(int i = 0; i < streamLace->numStreams; ++i) { fprintf(stdout, "%s streamLace[%d].size=%d\t", getSystemTime( timeChar), i, streamLace->streams[i]->size); } */ tscnt = audiopktcnt = videopktcnt = kfcnt = 0; avio_flush(oc->pb); avio_close(oc->pb); // Keep track of accumulated rounding error to account for it in later chunks. /* double segment_duration = segment_time - prev_segment_time; int rounded_segment_duration = (int) (segment_duration + 0.5); extra_duration_needed += (double) rounded_segment_duration - segment_duration; */ double seg_dur = segment_time - prev_segment_time; int rounded_segment_duration = (int) (seg_dur + 0.5); extra_duration_needed += (target_segment_duration - seg_dur - segment_duration_error_tolerance); //fprintf(stdout, "%s ________extra_duration_needed = %lf\n", getSystemTime( timeChar), extra_duration_needed); updatePlaylist(playlist, playlist_filename, output_filename, output_index, rounded_segment_duration); snprintf(output_filename, strlen(output_prefix) + 15, "%s-%u.ts", output_prefix, ++output_index); //add by wanggm //Save the all the keyframe information into json file if( 1 == if_save_keyframe && NULL != obj) { save_json_to_file(keyframeinfo_filename, obj); obj = info_arr_obj = NULL; snprintf(keyframeinfo_filename, strlen(output_prefix) + 15, "%s-%u.idx", output_prefix, output_index); } if (avio_open(&oc->pb, output_filename, AVIO_FLAG_WRITE) < 0) { fprintf(stderr, "%s Could not open '%s'\n", getSystemTime( timeChar), output_filename); break; } // close when we find the 'kill' file if (kill_file) { FILE* fp = fopen("kill", "rb"); if (fp) { fprintf(stderr, "%s user abort: found kill file\n", getSystemTime( timeChar)); fclose(fp); remove("kill"); decode_done = 1; removeAllPackets(streamLace); } } prev_segment_time = segment_time; } //add by wanggm. ++tscnt; if( video_index == packet.stream_index) { ++videopktcnt; if(1 == packet.flags) { ++kfcnt; if( 1 == if_save_keyframe) { //If it is key frame, it's information should be saved. //fprintf(stdout, "%s packet is keyframe, packet.pts=%ld\n", getSystemTime( timeChar), packet.pts); snprintf(keyframeinfo_filename, strlen(output_prefix) + 15, "%s-%u.idx", output_prefix, output_index); if (NULL == obj && NULL == info_arr_obj) { obj = json_object_new_object(); info_arr_obj = create_json_header(obj); } avio_flush(oc->pb); //flush the previous data into ts file. int64_t offset = avio_tell(oc->pb); //Get the offset of this key frame in the file. save_keyframe_info(info_arr_obj, offset, packet.pts); //fprintf(stdout, "%s Keyframe.pos=%ld \tkeyframe.pts=%ld\n", getSystemTime( timeChar), offset, (long)packet.pts); } } }else if( audio_index == packet.stream_index) { ++audiopktcnt; } //fprintf(stdout, "%s packet is not keyframe.\n", getSystemTime( timeChar)); ret = av_interleaved_write_frame(oc, &packet); if (ret < 0) { fprintf(stderr, "%s Warning: Could not write frame of stream\n", getSystemTime( timeChar)); // add by houmr continue_error_cnt++; if (continue_error_cnt > 10) { av_free_packet(&packet); break; } } else if (ret > 0) { fprintf(stderr, "%s End of stream requested\n", getSystemTime( timeChar)); av_free_packet(&packet); break; } else { // add by houmr error continue_error_cnt = 0; //////////////////////// } av_free_packet(&packet); } while (!decode_done || countPackets(streamLace) > 0); av_write_trailer(oc); if (video_index >= 0) { avcodec_close(video_st->codec); } for (i = 0; i < oc->nb_streams; i++) { av_freep(&oc->streams[i]->codec); av_freep(&oc->streams[i]); } avio_close(oc->pb); av_free(oc); updatePlaylist(playlist, playlist_filename, output_filename, output_index, segment_duration); closePlaylist(playlist); releasePlaylist(&playlist); //add by wanggm if( 1 == if_save_keyframe && obj != NULL) { save_json_to_file(keyframeinfo_filename, obj); } if (pid_filename) { remove(pid_filename); } fflush(stdout); fflush(stderr); return 0; error: if (pid_filename) { remove(pid_filename); } return 1; }