static void set_luma_transfer( struct SwsContext *context, int colorspace, int use_full_range ) { int *coefficients; const int *new_coefficients; int full_range; int brightness, contrast, saturation; if ( sws_getColorspaceDetails( context, &coefficients, &full_range, &coefficients, &full_range, &brightness, &contrast, &saturation ) != -1 ) { // Don't change these from defaults unless explicitly told to. if ( use_full_range >= 0 ) full_range = use_full_range; switch ( colorspace ) { case 170: case 470: case 601: case 624: new_coefficients = sws_getCoefficients( SWS_CS_ITU601 ); break; case 240: new_coefficients = sws_getCoefficients( SWS_CS_SMPTE240M ); break; case 709: new_coefficients = sws_getCoefficients( SWS_CS_ITU709 ); break; default: new_coefficients = coefficients; break; } sws_setColorspaceDetails( context, new_coefficients, full_range, new_coefficients, full_range, brightness, contrast, saturation ); } }
bool ImageConverterFFPrivate::setupColorspaceDetails(bool force) { if (!sws_ctx) { update_eq = true; return false; } if (force) update_eq = true; if (!update_eq) { return true; } const int srcRange = range_in == ColorRange_Limited ? 0 : 1; int dstRange = range_out == ColorRange_Limited ? 0 : 1; // TODO: color space bool supported = sws_setColorspaceDetails(sws_ctx, sws_getCoefficients(SWS_CS_DEFAULT) , srcRange, sws_getCoefficients(SWS_CS_DEFAULT) , dstRange , ((brightness << 16) + 50)/100 , (((contrast + 100) << 16) + 50)/100 , (((saturation + 100) << 16) + 50)/100 ) >= 0; //sws_init_context(d.sws_ctx, NULL, NULL); update_eq = false; return supported; }
bool ImageConverterFF::setupColorspaceDetails() { DPTR_D(ImageConverterFF); if (!d.sws_ctx) { d.update_eq = true; return false; } //if (!d.update_eq) // return true; // FIXME: how to fill the ranges? const int srcRange = 1; const int dstRange = 0; // TODO: SWS_CS_DEFAULT? sws_setColorspaceDetails(d.sws_ctx, sws_getCoefficients(SWS_CS_DEFAULT) , srcRange, sws_getCoefficients(SWS_CS_DEFAULT) , dstRange , ((d.brightness << 16) + 50)/100 , (((d.contrast + 100) << 16) + 50)/100 , (((d.saturation + 100) << 16) + 50)/100 ); // TODO: b, c, s map function? //sws_init_context(d.sws_ctx, NULL, NULL); d.update_eq = false; return true; }
bool ImageConverterFFPrivate::setupColorspaceDetails(bool force) { if (!sws_ctx) { update_eq = true; return false; } if (force) update_eq = true; if (!update_eq) { return true; } // FIXME: how to fill the ranges? const int srcRange = 1; const int dstRange = 0; // TODO: SWS_CS_DEFAULT? bool supported = sws_setColorspaceDetails(sws_ctx, sws_getCoefficients(SWS_CS_DEFAULT) , srcRange, sws_getCoefficients(SWS_CS_DEFAULT) , dstRange , ((brightness << 16) + 50)/100 , (((contrast + 100) << 16) + 50)/100 , (((saturation + 100) << 16) + 50)/100 ) >= 0; //sws_init_context(d.sws_ctx, NULL, NULL); update_eq = false; return supported; }
SwsContext *GetSwsContext(int SrcW, int SrcH, AVPixelFormat SrcFormat, int SrcColorSpace, int SrcColorRange, int DstW, int DstH, AVPixelFormat DstFormat, int DstColorSpace, int DstColorRange, int64_t Flags) { Flags |= SWS_FULL_CHR_H_INT | SWS_FULL_CHR_H_INP | SWS_ACCURATE_RND; SwsContext *Context = sws_alloc_context(); if (!Context) return nullptr; // 0 = limited range, 1 = full range int SrcRange = SrcColorRange == AVCOL_RANGE_JPEG; int DstRange = DstColorRange == AVCOL_RANGE_JPEG; av_opt_set_int(Context, "sws_flags", Flags, 0); av_opt_set_int(Context, "srcw", SrcW, 0); av_opt_set_int(Context, "srch", SrcH, 0); av_opt_set_int(Context, "dstw", DstW, 0); av_opt_set_int(Context, "dsth", DstH, 0); av_opt_set_int(Context, "src_range", SrcRange, 0); av_opt_set_int(Context, "dst_range", DstRange, 0); av_opt_set_int(Context, "src_format", SrcFormat, 0); av_opt_set_int(Context, "dst_format", DstFormat, 0); sws_setColorspaceDetails(Context, sws_getCoefficients(SrcColorSpace), SrcRange, sws_getCoefficients(DstColorSpace), DstRange, 0, 1<<16, 1<<16); if(sws_init_context(Context, nullptr, nullptr) < 0){ sws_freeContext(Context); return nullptr; } return Context; }
static inline const int *get_ffmpeg_coeffs(enum video_colorspace cs) { switch (cs) { case VIDEO_CS_DEFAULT: return sws_getCoefficients(SWS_CS_ITU601); case VIDEO_CS_601: return sws_getCoefficients(SWS_CS_ITU601); case VIDEO_CS_709: return sws_getCoefficients(SWS_CS_ITU709); } return sws_getCoefficients(SWS_CS_ITU601); }
static bool mp_media_init_scaling(mp_media_t *m) { int space = get_sws_colorspace(m->v.decoder->colorspace); int range = get_sws_range(m->v.decoder->color_range); const int *coeff = sws_getCoefficients(space); m->swscale = sws_getCachedContext(NULL, m->v.decoder->width, m->v.decoder->height, m->v.decoder->pix_fmt, m->v.decoder->width, m->v.decoder->height, m->scale_format, SWS_FAST_BILINEAR, NULL, NULL, NULL); if (!m->swscale) { blog(LOG_WARNING, "MP: Failed to initialize scaler"); return false; } sws_setColorspaceDetails(m->swscale, coeff, range, coeff, range, 0, FIXED_1_0, FIXED_1_0); int ret = av_image_alloc(m->scale_pic, m->scale_linesizes, m->v.decoder->width, m->v.decoder->height, m->scale_format, 1); if (ret < 0) { blog(LOG_WARNING, "MP: Failed to create scale pic data"); return false; } return true; }
BOOL __stdcall DllMain(HMODULE hm, DWORD reason, LPVOID dummy) { dummy; if(reason == DLL_PROCESS_ATTACH) { extern AVCodec ff_h264_decoder; DisableThreadLibraryCalls(hm); InitializeCriticalSection(&g_cs); av_log_set_level(AV_LOG_PANIC); g_cs_table = sws_getCoefficients(SWS_CS_ITU601); memset(&g_packet, 0, sizeof(g_packet)); g_packet.pts = AV_NOPTS_VALUE; g_packet.dts = AV_NOPTS_VALUE; g_packet.pos = -1; avcodec_register(&ff_h264_decoder); } else if(reason == DLL_PROCESS_DETACH) { if(g_swsctx) { sws_freeContext(g_swsctx); g_swsctx = 0; } if(g_context) { av_free(g_context); g_context = 0; } if(g_frame) { av_free(g_frame); g_frame = 0; } DeleteCriticalSection(&g_cs); } return TRUE; }
void MovieDecoder::convertAndScaleFrame(PixelFormat format, int scaledSize, bool maintainAspectRatio, int& scaledWidth, int& scaledHeight) { calculateDimensions(scaledSize, maintainAspectRatio, scaledWidth, scaledHeight); #ifdef LATEST_GREATEST_FFMPEG // Enable this when it hits the released ffmpeg version SwsContext* scaleContext = sws_alloc_context(); if (scaleContext == nullptr) { throw std::logic_error("Failed to allocate scale context"); } av_set_int(scaleContext, "srcw", m_pVideoCodecContext->width); av_set_int(scaleContext, "srch", m_pVideoCodecContext->height); av_set_int(scaleContext, "src_format", m_pVideoCodecContext->pix_fmt); av_set_int(scaleContext, "dstw", scaledWidth); av_set_int(scaleContext, "dsth", scaledHeight); av_set_int(scaleContext, "dst_format", format); av_set_int(scaleContext, "sws_flags", SWS_BICUBIC); const int* coeff = sws_getCoefficients(SWS_CS_DEFAULT); if (sws_setColorspaceDetails(scaleContext, coeff, m_pVideoCodecContext->pix_fmt, coeff, format, 0, 1<<16, 1<<16) < 0) { sws_freeContext(scaleContext); throw std::logic_error("Failed to set colorspace details"); } if (sws_init_context(scaleContext, nullptr, nullptr) < 0) { sws_freeContext(scaleContext); throw std::logic_error("Failed to initialise scale context"); } #endif SwsContext* scaleContext = sws_getContext(m_pVideoCodecContext->width, m_pVideoCodecContext->height, m_pVideoCodecContext->pix_fmt, scaledWidth, scaledHeight, format, SWS_BICUBIC, nullptr, nullptr, nullptr); if (nullptr == scaleContext) { throw logic_error("Failed to create resize context"); } AVFrame* convertedFrame = nullptr; uint8_t* convertedFrameBuffer = nullptr; createAVFrame(&convertedFrame, &convertedFrameBuffer, scaledWidth, scaledHeight, format); sws_scale(scaleContext, m_pFrame->data, m_pFrame->linesize, 0, m_pVideoCodecContext->height, convertedFrame->data, convertedFrame->linesize); sws_freeContext(scaleContext); av_free(m_pFrame); av_free(m_pFrameBuffer); m_pFrame = convertedFrame; m_pFrameBuffer = convertedFrameBuffer; }
int mlt_set_luma_transfer( struct SwsContext *context, int src_colorspace, int dst_colorspace, int src_full_range, int dst_full_range ) { const int *src_coefficients = sws_getCoefficients( SWS_CS_DEFAULT ); const int *dst_coefficients = sws_getCoefficients( SWS_CS_DEFAULT ); int brightness = 0; int contrast = 1 << 16; int saturation = 1 << 16; int src_range = src_full_range ? 1 : 0; int dst_range = dst_full_range ? 1 : 0; switch ( src_colorspace ) { case 170: case 470: case 601: case 624: src_coefficients = sws_getCoefficients( SWS_CS_ITU601 ); break; case 240: src_coefficients = sws_getCoefficients( SWS_CS_SMPTE240M ); break; case 709: src_coefficients = sws_getCoefficients( SWS_CS_ITU709 ); break; default: break; } switch ( dst_colorspace ) { case 170: case 470: case 601: case 624: dst_coefficients = sws_getCoefficients( SWS_CS_ITU601 ); break; case 240: dst_coefficients = sws_getCoefficients( SWS_CS_SMPTE240M ); break; case 709: dst_coefficients = sws_getCoefficients( SWS_CS_ITU709 ); break; default: break; } return sws_setColorspaceDetails( context, src_coefficients, src_range, dst_coefficients, dst_range, brightness, contrast, saturation ); }
SwsContext *FFGetSwsContext(int SrcW, int SrcH, PixelFormat SrcFormat, int DstW, int DstH, PixelFormat DstFormat, int64_t Flags, int ColorSpace, int ColorRange) { Flags |= SWS_FULL_CHR_H_INT | SWS_FULL_CHR_H_INP; #if LIBSWSCALE_VERSION_INT < AV_VERSION_INT(0, 12, 0) return sws_getContext(SrcW, SrcH, SrcFormat, DstW, DstH, DstFormat, Flags, 0, 0, 0); #else SwsContext *Context = sws_alloc_context(); if (!Context) return 0; // The intention here is to never change the color range. int Range; // 0 = limited range, 1 = full range if (ColorRange == AVCOL_RANGE_JPEG) Range = 1; else // explicit limited range, or unspecified Range = 0; av_opt_set_int(Context, "sws_flags", Flags, 0); av_opt_set_int(Context, "srcw", SrcW, 0); av_opt_set_int(Context, "srch", SrcH, 0); av_opt_set_int(Context, "dstw", DstW, 0); av_opt_set_int(Context, "dsth", DstH, 0); av_opt_set_int(Context, "src_range", Range, 0); av_opt_set_int(Context, "dst_range", Range, 0); av_opt_set_int(Context, "src_format", SrcFormat, 0); av_opt_set_int(Context, "dst_format", DstFormat, 0); sws_setColorspaceDetails(Context, sws_getCoefficients(ColorSpace), Range, sws_getCoefficients(ColorSpace), Range, 0, 1<<16, 1<<16); if(sws_init_context(Context, 0, 0) < 0){ sws_freeContext(Context); return 0; } return Context; #endif }
SwsContext *GetSwsContext(int SrcW, int SrcH, PixelFormat SrcFormat, int SrcColorSpace, int SrcColorRange, int DstW, int DstH, PixelFormat DstFormat, int DstColorSpace, int DstColorRange, int64_t Flags) { Flags |= SWS_FULL_CHR_H_INT | SWS_FULL_CHR_H_INP; #if LIBSWSCALE_VERSION_INT < AV_VERSION_INT(0, 12, 0) return sws_getContext(SrcW, SrcH, SrcFormat, DstW, DstH, DstFormat, Flags, 0, 0, 0); #else SwsContext *Context = sws_alloc_context(); if (!Context) return 0; // 0 = limited range, 1 = full range int SrcRange = SrcColorRange == AVCOL_RANGE_JPEG; int DstRange = DstColorRange == AVCOL_RANGE_JPEG; av_opt_set_int(Context, "sws_flags", Flags, 0); av_opt_set_int(Context, "srcw", SrcW, 0); av_opt_set_int(Context, "srch", SrcH, 0); av_opt_set_int(Context, "dstw", DstW, 0); av_opt_set_int(Context, "dsth", DstH, 0); av_opt_set_int(Context, "src_range", SrcRange, 0); av_opt_set_int(Context, "dst_range", DstRange, 0); av_opt_set_int(Context, "src_format", SrcFormat, 0); av_opt_set_int(Context, "dst_format", DstFormat, 0); sws_setColorspaceDetails(Context, sws_getCoefficients(SrcColorSpace), SrcRange, sws_getCoefficients(DstColorSpace), DstRange, 0, 1<<16, 1<<16); if(sws_init_context(Context, 0, 0) < 0) { sws_freeContext(Context); return 0; } return Context; #endif }
struct SwsContext *update_scaler_configuration ( struct SwsContext *sws_ctx, int flags, int width, int height, enum AVPixelFormat input_pixel_format, enum AVPixelFormat output_pixel_format, enum AVColorSpace colorspace, int yuv_range ) { if( sws_ctx ) sws_freeContext( sws_ctx ); sws_ctx = sws_alloc_context(); if( !sws_ctx ) return NULL; av_opt_set_int( sws_ctx, "sws_flags", flags, 0 ); av_opt_set_int( sws_ctx, "srcw", width, 0 ); av_opt_set_int( sws_ctx, "srch", height, 0 ); av_opt_set_int( sws_ctx, "dstw", width, 0 ); av_opt_set_int( sws_ctx, "dsth", height, 0 ); av_opt_set_int( sws_ctx, "src_format", input_pixel_format, 0 ); av_opt_set_int( sws_ctx, "dst_format", output_pixel_format, 0 ); const int *yuv2rgb_coeffs = sws_getCoefficients( colorspace ); sws_setColorspaceDetails( sws_ctx, yuv2rgb_coeffs, yuv_range, yuv2rgb_coeffs, yuv_range, 0, 1 << 16, 1 << 16 ); if( sws_init_context( sws_ctx, NULL, NULL ) < 0 ) { sws_freeContext( sws_ctx ); return NULL; } return sws_ctx; }
void BenchmarkScale(unsigned int in_w, unsigned int in_h, unsigned int out_w, unsigned int out_h) { std::mt19937 rng(12345); #if SSR_USE_X86_ASM bool use_ssse3 = (CPUFeatures::HasMMX() && CPUFeatures::HasSSE() && CPUFeatures::HasSSE2() && CPUFeatures::HasSSE3() && CPUFeatures::HasSSSE3()); #endif // the queue needs to use enough memory to make sure that the CPU cache is flushed unsigned int pixels = std::max(in_w * in_h, out_w * out_h); unsigned int queue_size = 1 + 20000000 / pixels; unsigned int run_size = queue_size * 20; // create queue std::vector<std::unique_ptr<ImageGeneric> > queue_in(queue_size); std::vector<std::unique_ptr<ImageGeneric> > queue_out(queue_size); for(unsigned int i = 0; i < queue_size; ++i) { queue_in[i] = NewImageBGRA(in_w, in_h, rng); queue_out[i] = NewImageBGRA(out_w, out_h, rng); } // run test unsigned int time_swscale = 0, time_fallback = 0, time_ssse3 = 0; { SwsContext *sws = sws_getCachedContext(NULL, in_w, in_h, AV_PIX_FMT_BGRA, out_w, out_h, AV_PIX_FMT_BGRA, SWS_BILINEAR, NULL, NULL, NULL); if(sws == NULL) { Logger::LogError("[BenchmarkScale] " + Logger::tr("Error: Can't get swscale context!", "Don't translate 'swscale'")); throw LibavException(); } sws_setColorspaceDetails(sws, sws_getCoefficients(SWS_CS_ITU709), 0, sws_getCoefficients(SWS_CS_DEFAULT), 0, 0, 1 << 16, 1 << 16); int64_t t1 = hrt_time_micro(); for(unsigned int i = 0; i < run_size / 2; ++i) { unsigned int ii = i % queue_size; sws_scale(sws, queue_in[ii]->m_data.data(), queue_in[ii]->m_stride.data(), 0, in_h, queue_out[ii]->m_data.data(), queue_out[ii]->m_stride.data()); } int64_t t2 = hrt_time_micro(); time_swscale = (t2 - t1) / (run_size / 2); } { int64_t t1 = hrt_time_micro(); for(unsigned int i = 0; i < run_size; ++i) { unsigned int ii = i % queue_size; Scale_BGRA_Fallback(in_w, in_h, queue_in[ii]->m_data[0], queue_in[ii]->m_stride[0], out_w, out_h, queue_out[ii]->m_data[0], queue_out[ii]->m_stride[0]); } int64_t t2 = hrt_time_micro(); time_fallback = (t2 - t1) / run_size; } #if SSR_USE_X86_ASM if(use_ssse3) { int64_t t1 = hrt_time_micro(); for(unsigned int i = 0; i < run_size; ++i) { unsigned int ii = i % queue_size; Scale_BGRA_SSSE3(in_w, in_h, queue_in[ii]->m_data[0], queue_in[ii]->m_stride[0], out_w, out_h, queue_out[ii]->m_data[0], queue_out[ii]->m_stride[0]); } int64_t t2 = hrt_time_micro(); time_ssse3 = (t2 - t1) / run_size; } #endif // print result QString in_size = QString("%1x%2").arg(in_w).arg(in_h); QString out_size = QString("%1x%2").arg(out_w).arg(out_h); Logger::LogInfo("[BenchmarkScale] " + Logger::tr("BGRA %1 to BGRA %2 | SWScale %3 us | Fallback %4 us (%5%) | SSSE3 %6 us (%7%)") .arg(in_size, 9).arg(out_size, 9) .arg(time_swscale, 6) .arg(time_fallback, 6).arg(100 * time_fallback / time_swscale, 3) .arg(time_ssse3, 6).arg(100 * time_ssse3 / time_fallback, 3)); }
void BenchmarkConvert(unsigned int w, unsigned int h, PixelFormat in_format, PixelFormat out_format, const QString& in_format_name, const QString& out_format_name, NewImageFunc in_image, NewImageFunc out_image, ConvertFunc fallback #if SSR_USE_X86_ASM , ConvertFunc ssse3 #endif ) { std::mt19937 rng(12345); #if SSR_USE_X86_ASM bool use_ssse3 = (CPUFeatures::HasMMX() && CPUFeatures::HasSSE() && CPUFeatures::HasSSE2() && CPUFeatures::HasSSE3() && CPUFeatures::HasSSSE3()); #endif // the queue needs to use enough memory to make sure that the CPU cache is flushed unsigned int pixels = w * h; unsigned int queue_size = 1 + 20000000 / pixels; unsigned int run_size = queue_size * 20; // create queue std::vector<std::unique_ptr<ImageGeneric> > queue_in(queue_size); std::vector<std::unique_ptr<ImageGeneric> > queue_out(queue_size); for(unsigned int i = 0; i < queue_size; ++i) { queue_in[i] = in_image(w, h, rng); queue_out[i] = out_image(w, h, rng); } // run test unsigned int time_swscale = 0, time_fallback = 0, time_ssse3 = 0; { SwsContext *sws = sws_getCachedContext(NULL, w, h, in_format, w, h, out_format, SWS_BILINEAR, NULL, NULL, NULL); if(sws == NULL) { Logger::LogError("[BenchmarkScale] " + Logger::tr("Error: Can't get swscale context!", "Don't translate 'swscale'")); throw LibavException(); } sws_setColorspaceDetails(sws, sws_getCoefficients(SWS_CS_ITU709), 0, sws_getCoefficients(SWS_CS_DEFAULT), 0, 0, 1 << 16, 1 << 16); int64_t t1 = hrt_time_micro(); for(unsigned int i = 0; i < run_size / 2; ++i) { unsigned int ii = i % queue_size; sws_scale(sws, queue_in[ii]->m_data.data(), queue_in[ii]->m_stride.data(), 0, h, queue_out[ii]->m_data.data(), queue_out[ii]->m_stride.data()); } int64_t t2 = hrt_time_micro(); time_swscale = (t2 - t1) / (run_size / 2); } { int64_t t1 = hrt_time_micro(); for(unsigned int i = 0; i < run_size; ++i) { unsigned int ii = i % queue_size; fallback(w, h, queue_in[ii]->m_data[0], queue_in[ii]->m_stride[0], queue_out[ii]->m_data.data(), queue_out[ii]->m_stride.data()); } int64_t t2 = hrt_time_micro(); time_fallback = (t2 - t1) / run_size; } #if SSR_USE_X86_ASM if(use_ssse3) { int64_t t1 = hrt_time_micro(); for(unsigned int i = 0; i < run_size; ++i) { unsigned int ii = i % queue_size; ssse3(w, h, queue_in[ii]->m_data[0], queue_in[ii]->m_stride[0], queue_out[ii]->m_data.data(), queue_out[ii]->m_stride.data()); } int64_t t2 = hrt_time_micro(); time_ssse3 = (t2 - t1) / run_size; } #endif // print result QString size = QString("%1x%2").arg(w).arg(h); Logger::LogInfo("[BenchmarkConvert] " + Logger::tr("%1 %2 to %3 %4 | SWScale %5 us | Fallback %6 us (%7%) | SSSE3 %8 us (%9%)") .arg(in_format_name, 6).arg(size, 9).arg(out_format_name, 6).arg(size, 9) .arg(time_swscale, 6) .arg(time_fallback, 6).arg(100 * time_fallback / time_swscale, 3) .arg(time_ssse3, 6).arg(100 * time_ssse3 / time_fallback, 3)); }
static int startffmpeg(struct anim *anim) { int i, videoStream; AVCodec *pCodec; AVFormatContext *pFormatCtx = NULL; AVCodecContext *pCodecCtx; int frs_num; double frs_den; int streamcount; #ifdef FFMPEG_SWSCALE_COLOR_SPACE_SUPPORT /* The following for color space determination */ int srcRange, dstRange, brightness, contrast, saturation; int *table; const int *inv_table; #endif if (anim == 0) return(-1); streamcount = anim->streamindex; if (avformat_open_input(&pFormatCtx, anim->name, NULL, NULL) != 0) { return -1; } if (avformat_find_stream_info(pFormatCtx, NULL) < 0) { av_close_input_file(pFormatCtx); return -1; } av_dump_format(pFormatCtx, 0, anim->name, 0); /* Find the video stream */ videoStream = -1; for (i = 0; i < pFormatCtx->nb_streams; i++) if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) { if (streamcount > 0) { streamcount--; continue; } videoStream = i; break; } if (videoStream == -1) { av_close_input_file(pFormatCtx); return -1; } pCodecCtx = pFormatCtx->streams[videoStream]->codec; /* Find the decoder for the video stream */ pCodec = avcodec_find_decoder(pCodecCtx->codec_id); if (pCodec == NULL) { av_close_input_file(pFormatCtx); return -1; } pCodecCtx->workaround_bugs = 1; if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) { av_close_input_file(pFormatCtx); return -1; } anim->duration = ceil(pFormatCtx->duration * av_q2d(pFormatCtx->streams[videoStream]->r_frame_rate) / AV_TIME_BASE); frs_num = pFormatCtx->streams[videoStream]->r_frame_rate.num; frs_den = pFormatCtx->streams[videoStream]->r_frame_rate.den; frs_den *= AV_TIME_BASE; while (frs_num % 10 == 0 && frs_den >= 2.0 && frs_num > 10) { frs_num /= 10; frs_den /= 10; } anim->frs_sec = frs_num; anim->frs_sec_base = frs_den; anim->params = 0; anim->x = pCodecCtx->width; anim->y = av_get_cropped_height_from_codec(pCodecCtx); anim->pFormatCtx = pFormatCtx; anim->pCodecCtx = pCodecCtx; anim->pCodec = pCodec; anim->videoStream = videoStream; anim->interlacing = 0; anim->orientation = 0; anim->framesize = anim->x * anim->y * 4; anim->curposition = -1; anim->last_frame = 0; anim->last_pts = -1; anim->next_pts = -1; anim->next_packet.stream_index = -1; anim->pFrame = avcodec_alloc_frame(); anim->pFrameComplete = FALSE; anim->pFrameDeinterlaced = avcodec_alloc_frame(); anim->pFrameRGB = avcodec_alloc_frame(); if (avpicture_get_size(PIX_FMT_RGBA, anim->x, anim->y) != anim->x * anim->y * 4) { fprintf(stderr, "ffmpeg has changed alloc scheme ... ARGHHH!\n"); avcodec_close(anim->pCodecCtx); av_close_input_file(anim->pFormatCtx); av_free(anim->pFrameRGB); av_free(anim->pFrameDeinterlaced); av_free(anim->pFrame); anim->pCodecCtx = NULL; return -1; } if (anim->ib_flags & IB_animdeinterlace) { avpicture_fill((AVPicture *) anim->pFrameDeinterlaced, MEM_callocN(avpicture_get_size( anim->pCodecCtx->pix_fmt, anim->pCodecCtx->width, anim->pCodecCtx->height), "ffmpeg deinterlace"), anim->pCodecCtx->pix_fmt, anim->pCodecCtx->width, anim->pCodecCtx->height); } if (pCodecCtx->has_b_frames) { anim->preseek = 25; /* FIXME: detect gopsize ... */ } else { anim->preseek = 0; } anim->img_convert_ctx = sws_getContext( anim->x, anim->y, anim->pCodecCtx->pix_fmt, anim->x, anim->y, PIX_FMT_RGBA, SWS_FAST_BILINEAR | SWS_PRINT_INFO | SWS_FULL_CHR_H_INT, NULL, NULL, NULL); if (!anim->img_convert_ctx) { fprintf(stderr, "Can't transform color space??? Bailing out...\n"); avcodec_close(anim->pCodecCtx); av_close_input_file(anim->pFormatCtx); av_free(anim->pFrameRGB); av_free(anim->pFrameDeinterlaced); av_free(anim->pFrame); anim->pCodecCtx = NULL; return -1; } #ifdef FFMPEG_SWSCALE_COLOR_SPACE_SUPPORT /* Try do detect if input has 0-255 YCbCR range (JFIF Jpeg MotionJpeg) */ if (!sws_getColorspaceDetails(anim->img_convert_ctx, (int **)&inv_table, &srcRange, &table, &dstRange, &brightness, &contrast, &saturation)) { srcRange = srcRange || anim->pCodecCtx->color_range == AVCOL_RANGE_JPEG; inv_table = sws_getCoefficients(anim->pCodecCtx->colorspace); if (sws_setColorspaceDetails(anim->img_convert_ctx, (int *)inv_table, srcRange, table, dstRange, brightness, contrast, saturation)) { fprintf(stderr, "Warning: Could not set libswscale colorspace details.\n"); } } else { fprintf(stderr, "Warning: Could not set libswscale colorspace details.\n"); } #endif return (0); }
static ImBuf *avi_fetchibuf(struct anim *anim, int position) { ImBuf *ibuf = NULL; int *tmp; int y; if (anim == NULL) { return NULL; } #if defined(_WIN32) && !defined(FREE_WINDOWS) if (anim->avistreams) { LPBITMAPINFOHEADER lpbi; if (anim->pgf) { lpbi = AVIStreamGetFrame(anim->pgf, position + AVIStreamStart(anim->pavi[anim->firstvideo])); if (lpbi) { ibuf = IMB_ibImageFromMemory((unsigned char *) lpbi, 100, IB_rect, anim->colorspace, "<avi_fetchibuf>"); //Oh brother... } } } else { #else if (1) { #endif ibuf = IMB_allocImBuf(anim->x, anim->y, 24, IB_rect); tmp = AVI_read_frame(anim->avi, AVI_FORMAT_RGB32, position, AVI_get_stream(anim->avi, AVIST_VIDEO, 0)); if (tmp == NULL) { printf("Error reading frame from AVI: '%s'\n", anim->name); IMB_freeImBuf(ibuf); return NULL; } for (y = 0; y < anim->y; y++) { memcpy(&(ibuf->rect)[((anim->y - y) - 1) * anim->x], &tmp[y * anim->x], anim->x * 4); } MEM_freeN(tmp); } ibuf->rect_colorspace = colormanage_colorspace_get_named(anim->colorspace); return ibuf; } #endif /* WITH_AVI */ #ifdef WITH_FFMPEG static int startffmpeg(struct anim *anim) { int i, videoStream; AVCodec *pCodec; AVFormatContext *pFormatCtx = NULL; AVCodecContext *pCodecCtx; int frs_num; double frs_den; int streamcount; #ifdef FFMPEG_SWSCALE_COLOR_SPACE_SUPPORT /* The following for color space determination */ int srcRange, dstRange, brightness, contrast, saturation; int *table; const int *inv_table; #endif if (anim == 0) return(-1); streamcount = anim->streamindex; if (avformat_open_input(&pFormatCtx, anim->name, NULL, NULL) != 0) { return -1; } if (avformat_find_stream_info(pFormatCtx, NULL) < 0) { av_close_input_file(pFormatCtx); return -1; } av_dump_format(pFormatCtx, 0, anim->name, 0); /* Find the video stream */ videoStream = -1; for (i = 0; i < pFormatCtx->nb_streams; i++) if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) { if (streamcount > 0) { streamcount--; continue; } videoStream = i; break; } if (videoStream == -1) { av_close_input_file(pFormatCtx); return -1; } pCodecCtx = pFormatCtx->streams[videoStream]->codec; /* Find the decoder for the video stream */ pCodec = avcodec_find_decoder(pCodecCtx->codec_id); if (pCodec == NULL) { av_close_input_file(pFormatCtx); return -1; } pCodecCtx->workaround_bugs = 1; if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) { av_close_input_file(pFormatCtx); return -1; } anim->duration = ceil(pFormatCtx->duration * av_q2d(pFormatCtx->streams[videoStream]->r_frame_rate) / AV_TIME_BASE); frs_num = pFormatCtx->streams[videoStream]->r_frame_rate.num; frs_den = pFormatCtx->streams[videoStream]->r_frame_rate.den; frs_den *= AV_TIME_BASE; while (frs_num % 10 == 0 && frs_den >= 2.0 && frs_num > 10) { frs_num /= 10; frs_den /= 10; } anim->frs_sec = frs_num; anim->frs_sec_base = frs_den; anim->params = 0; anim->x = pCodecCtx->width; anim->y = av_get_cropped_height_from_codec(pCodecCtx); anim->pFormatCtx = pFormatCtx; anim->pCodecCtx = pCodecCtx; anim->pCodec = pCodec; anim->videoStream = videoStream; anim->interlacing = 0; anim->orientation = 0; anim->framesize = anim->x * anim->y * 4; anim->curposition = -1; anim->last_frame = 0; anim->last_pts = -1; anim->next_pts = -1; anim->next_packet.stream_index = -1; anim->pFrame = avcodec_alloc_frame(); anim->pFrameComplete = FALSE; anim->pFrameDeinterlaced = avcodec_alloc_frame(); anim->pFrameRGB = avcodec_alloc_frame(); if (avpicture_get_size(PIX_FMT_RGBA, anim->x, anim->y) != anim->x * anim->y * 4) { fprintf(stderr, "ffmpeg has changed alloc scheme ... ARGHHH!\n"); avcodec_close(anim->pCodecCtx); av_close_input_file(anim->pFormatCtx); av_free(anim->pFrameRGB); av_free(anim->pFrameDeinterlaced); av_free(anim->pFrame); anim->pCodecCtx = NULL; return -1; } if (anim->ib_flags & IB_animdeinterlace) { avpicture_fill((AVPicture *) anim->pFrameDeinterlaced, MEM_callocN(avpicture_get_size( anim->pCodecCtx->pix_fmt, anim->pCodecCtx->width, anim->pCodecCtx->height), "ffmpeg deinterlace"), anim->pCodecCtx->pix_fmt, anim->pCodecCtx->width, anim->pCodecCtx->height); } if (pCodecCtx->has_b_frames) { anim->preseek = 25; /* FIXME: detect gopsize ... */ } else { anim->preseek = 0; } anim->img_convert_ctx = sws_getContext( anim->x, anim->y, anim->pCodecCtx->pix_fmt, anim->x, anim->y, PIX_FMT_RGBA, SWS_FAST_BILINEAR | SWS_PRINT_INFO | SWS_FULL_CHR_H_INT, NULL, NULL, NULL); if (!anim->img_convert_ctx) { fprintf(stderr, "Can't transform color space??? Bailing out...\n"); avcodec_close(anim->pCodecCtx); av_close_input_file(anim->pFormatCtx); av_free(anim->pFrameRGB); av_free(anim->pFrameDeinterlaced); av_free(anim->pFrame); anim->pCodecCtx = NULL; return -1; } #ifdef FFMPEG_SWSCALE_COLOR_SPACE_SUPPORT /* Try do detect if input has 0-255 YCbCR range (JFIF Jpeg MotionJpeg) */ if (!sws_getColorspaceDetails(anim->img_convert_ctx, (int **)&inv_table, &srcRange, &table, &dstRange, &brightness, &contrast, &saturation)) { srcRange = srcRange || anim->pCodecCtx->color_range == AVCOL_RANGE_JPEG; inv_table = sws_getCoefficients(anim->pCodecCtx->colorspace); if (sws_setColorspaceDetails(anim->img_convert_ctx, (int *)inv_table, srcRange, table, dstRange, brightness, contrast, saturation)) { fprintf(stderr, "Warning: Could not set libswscale colorspace details.\n"); } } else { fprintf(stderr, "Warning: Could not set libswscale colorspace details.\n"); } #endif return (0); }