// Detect type of input file and open it if recognized. Routine // based on the av_open_input_file() libavformat function. int ufile_fopen_input(AVFormatContext **ic_ptr, const wxString &name) { wxFileName f(name); AVIOContext *pb = NULL; int err; // Open the file to prepare for probing if ((err = ufile_fopen(&pb, name, AVIO_FLAG_READ)) < 0) { goto fail; } *ic_ptr = avformat_alloc_context(); (*ic_ptr)->pb = pb; // And finally, attempt to associate an input stream with the file err = avformat_open_input(ic_ptr, OSINPUT(name), NULL, NULL); if (err) { goto fail; } return 0; fail: if (pb) { ufile_close(pb); } *ic_ptr = NULL; return err; }
JSValueRef function_file_writer_close(JSContextRef ctx, JSObjectRef function, JSObjectRef thisObject, size_t argc, const JSValueRef args[], JSValueRef *exception) { if (argc == 1 && JSValueGetType(ctx, args[0]) == kJSTypeString) { char *descriptor = value_to_c_string(ctx, args[0]); ufile_close(descriptor_str_to_int(descriptor)); free(descriptor); } return JSValueMakeNull(ctx); }
FFmpegContext::~FFmpegContext() { if (FFmpegLibsInst()->ValidLibsLoaded()) { if (ic_ptr) avformat_close_input(&ic_ptr); av_log_set_callback(av_log_default_callback); } if (pb) { ufile_close(pb); if (FFmpegLibsInst()->ValidLibsLoaded()) { av_free(pb->buffer); av_free(pb); } } }
bool ExportFFmpeg::Finalize() { int i, nEncodedBytes; // Flush the audio FIFO and encoder. for (;;) { AVPacket pkt; int nFifoBytes = av_fifo_size(mEncAudioFifo); // any bytes left in audio FIFO? av_init_packet(&pkt); nEncodedBytes = 0; int nAudioFrameSizeOut = default_frame_size * mEncAudioCodecCtx->channels * sizeof(int16_t); if (nAudioFrameSizeOut > mEncAudioFifoOutBufSiz || nFifoBytes > mEncAudioFifoOutBufSiz) { wxMessageBox(wxString::Format(_("FFmpeg : ERROR - Too much remaining data.")), _("FFmpeg Error"), wxOK|wxCENTER|wxICON_EXCLAMATION); return false; } // Flush the audio FIFO first if necessary. It won't contain a _full_ audio frame because // if it did we'd have pulled it from the FIFO during the last encodeAudioFrame() call - // the encoder must support short/incomplete frames for this to work. if (nFifoBytes > 0) { // Fill audio buffer with zeroes. If codec tries to read the whole buffer, // it will just read silence. If not - who cares? memset(mEncAudioFifoOutBuf,0,mEncAudioFifoOutBufSiz); const AVCodec *codec = mEncAudioCodecCtx->codec; // We have an incomplete buffer of samples left. Is it OK to encode it? // If codec supports CODEC_CAP_SMALL_LAST_FRAME, we can feed it with smaller frame // Or if codec is FLAC, feed it anyway (it doesn't have CODEC_CAP_SMALL_LAST_FRAME, but it works) // Or if frame_size is 1, then it's some kind of PCM codec, they don't have frames and will be fine with the samples // Or if user configured the exporter to pad with silence, then we'll send audio + silence as a frame. if ((codec->capabilities & (CODEC_CAP_SMALL_LAST_FRAME|CODEC_CAP_VARIABLE_FRAME_SIZE)) || mEncAudioCodecCtx->frame_size <= 1 || gPrefs->Read(wxT("/FileFormats/OverrideSmallLastFrame"), true) ) { int frame_size = default_frame_size; // The last frame is going to contain a smaller than usual number of samples. // For codecs without CODEC_CAP_SMALL_LAST_FRAME use normal frame size if (codec->capabilities & (CODEC_CAP_SMALL_LAST_FRAME|CODEC_CAP_VARIABLE_FRAME_SIZE)) frame_size = nFifoBytes / (mEncAudioCodecCtx->channels * sizeof(int16_t)); wxLogDebug(wxT("FFmpeg : Audio FIFO still contains %d bytes, writing %d sample frame ..."), nFifoBytes, frame_size); // Pull the bytes out from the FIFO and feed them to the encoder. if (av_fifo_generic_read(mEncAudioFifo, mEncAudioFifoOutBuf, nFifoBytes, NULL) == 0) { nEncodedBytes = encode_audio(mEncAudioCodecCtx, &pkt, (int16_t*)mEncAudioFifoOutBuf, frame_size); } } } // Now flush the encoder. if (nEncodedBytes <= 0) nEncodedBytes = encode_audio(mEncAudioCodecCtx, &pkt, NULL, 0); if (nEncodedBytes <= 0) break; pkt.stream_index = mEncAudioStream->index; // Set presentation time of frame (currently in the codec's timebase) in the stream timebase. if(pkt.pts != int64_t(AV_NOPTS_VALUE)) pkt.pts = av_rescale_q(pkt.pts, mEncAudioCodecCtx->time_base, mEncAudioStream->time_base); if(pkt.dts != int64_t(AV_NOPTS_VALUE)) pkt.dts = av_rescale_q(pkt.dts, mEncAudioCodecCtx->time_base, mEncAudioStream->time_base); if (av_interleaved_write_frame(mEncFormatCtx, &pkt) != 0) { wxMessageBox(wxString::Format(_("FFmpeg : ERROR - Couldn't write last audio frame to output file.")), _("FFmpeg Error"), wxOK|wxCENTER|wxICON_EXCLAMATION); break; } av_free_packet(&pkt); } // Write any file trailers. av_write_trailer(mEncFormatCtx); // Close the codecs. if (mEncAudioStream != NULL) avcodec_close(mEncAudioStream->codec); for (i = 0; i < (int)mEncFormatCtx->nb_streams; i++) { av_freep(&mEncFormatCtx->streams[i]->codec); av_freep(&mEncFormatCtx->streams[i]); } // Close the output file if we created it. if (!(mEncFormatDesc->flags & AVFMT_NOFILE)) ufile_close(mEncFormatCtx->pb); // Free any buffers or structures we allocated. av_free(mEncFormatCtx); av_freep(&mEncAudioFifoOutBuf); mEncAudioFifoOutBufSiz = 0; av_fifo_free(mEncAudioFifo); mEncAudioFifo = NULL; return true; }