int BKE_ffmpeg_append(void *context_v, RenderData *rd, int start_frame, int frame, int *pixels, int rectx, int recty, const char *suffix, ReportList *reports) { FFMpegContext *context = context_v; AVFrame *avframe; int success = 1; PRINT("Writing frame %i, render width=%d, render height=%d\n", frame, rectx, recty); /* why is this done before writing the video frame and again at end_ffmpeg? */ // write_audio_frames(frame / (((double)rd->frs_sec) / rd->frs_sec_base)); if (context->video_stream) { avframe = generate_video_frame(context, (unsigned char *) pixels, reports); success = (avframe && write_video_frame(context, rd, frame - start_frame, avframe, reports)); if (context->ffmpeg_autosplit) { if (avio_tell(context->outfile->pb) > FFMPEG_AUTOSPLIT_SIZE) { end_ffmpeg_impl(context, true); context->ffmpeg_autosplit_count++; success &= start_ffmpeg_impl(context, rd, rectx, recty, suffix, reports); } } } #ifdef WITH_AUDASPACE write_audio_frames(context, (frame - start_frame) / (((double)rd->frs_sec) / (double)rd->frs_sec_base)); #endif return success; }
int append_ffmpeg(RenderData *rd, int frame, int *pixels, int rectx, int recty, ReportList *reports) { AVFrame* avframe; int success = 1; fprintf(stderr, "Writing frame %i, " "render width=%d, render height=%d\n", frame, rectx, recty); // why is this done before writing the video frame and again at end_ffmpeg? // write_audio_frames(frame / (((double)rd->frs_sec) / rd->frs_sec_base)); if(video_stream) { avframe= generate_video_frame((unsigned char*) pixels, reports); success= (avframe && write_video_frame(rd, avframe, reports)); if (ffmpeg_autosplit) { if (avio_tell(outfile->pb) > FFMPEG_AUTOSPLIT_SIZE) { end_ffmpeg(); ffmpeg_autosplit_count++; success &= start_ffmpeg_impl(rd, rectx, recty, reports); } } } #ifdef WITH_AUDASPACE write_audio_frames((frame - rd->sfra) / (((double)rd->frs_sec) / rd->frs_sec_base)); #endif return success; }
int BKE_ffmpeg_start(struct Scene *scene, RenderData *rd, int rectx, int recty, ReportList *reports) { int success; ffmpeg_autosplit_count = 0; success = start_ffmpeg_impl(rd, rectx, recty, reports); #ifdef WITH_AUDASPACE if (audio_stream) { AVCodecContext *c = audio_stream->codec; AUD_DeviceSpecs specs; specs.channels = c->channels; if (use_float_audio_buffer(c->codec_id)) { specs.format = AUD_FORMAT_FLOAT32; } else { specs.format = AUD_FORMAT_S16; } specs.rate = rd->ffcodecdata.audio_mixrate; audio_mixdown_device = sound_mixdown(scene, specs, rd->sfra, rd->ffcodecdata.audio_volume); #ifdef FFMPEG_CODEC_TIME_BASE c->time_base.den = specs.rate; c->time_base.num = 1; #endif } #endif return success; }
int BKE_ffmpeg_start(void *context_v, struct Scene *scene, RenderData *rd, int rectx, int recty, ReportList *reports, bool preview, const char *suffix) { int success; FFMpegContext *context = context_v; context->ffmpeg_autosplit_count = 0; context->ffmpeg_preview = preview; success = start_ffmpeg_impl(context, rd, rectx, recty, suffix, reports); #ifdef WITH_AUDASPACE if (context->audio_stream) { AVCodecContext *c = context->audio_stream->codec; AUD_DeviceSpecs specs; specs.channels = c->channels; switch (av_get_packed_sample_fmt(c->sample_fmt)) { case AV_SAMPLE_FMT_U8: specs.format = AUD_FORMAT_U8; break; case AV_SAMPLE_FMT_S16: specs.format = AUD_FORMAT_S16; break; case AV_SAMPLE_FMT_S32: specs.format = AUD_FORMAT_S32; break; case AV_SAMPLE_FMT_FLT: specs.format = AUD_FORMAT_FLOAT32; break; case AV_SAMPLE_FMT_DBL: specs.format = AUD_FORMAT_FLOAT64; break; default: return -31415; } specs.rate = rd->ffcodecdata.audio_mixrate; context->audio_mixdown_device = BKE_sound_mixdown(scene, specs, preview ? rd->psfra : rd->sfra, rd->ffcodecdata.audio_volume); #ifdef FFMPEG_CODEC_TIME_BASE c->time_base.den = specs.rate; c->time_base.num = 1; #endif } #endif return success; }