static void set_luma_transfer( struct SwsContext *context, int colorspace, int use_full_range ) { int *coefficients; const int *new_coefficients; int full_range; int brightness, contrast, saturation; if ( sws_getColorspaceDetails( context, &coefficients, &full_range, &coefficients, &full_range, &brightness, &contrast, &saturation ) != -1 ) { // Don't change these from defaults unless explicitly told to. if ( use_full_range >= 0 ) full_range = use_full_range; switch ( colorspace ) { case 170: case 470: case 601: case 624: new_coefficients = sws_getCoefficients( SWS_CS_ITU601 ); break; case 240: new_coefficients = sws_getCoefficients( SWS_CS_SMPTE240M ); break; case 709: new_coefficients = sws_getCoefficients( SWS_CS_ITU709 ); break; default: new_coefficients = coefficients; break; } sws_setColorspaceDetails( context, new_coefficients, full_range, new_coefficients, full_range, brightness, contrast, saturation ); } }
void MediaEngine::updateSwsFormat(int videoPixelMode) { #ifdef USE_FFMPEG auto codecIter = m_pCodecCtxs.find(m_videoStream); AVCodecContext *m_pCodecCtx = codecIter == m_pCodecCtxs.end() ? 0 : codecIter->second; AVPixelFormat swsDesired = getSwsFormat(videoPixelMode); if (swsDesired != m_sws_fmt && m_pCodecCtx != 0) { m_sws_fmt = swsDesired; m_sws_ctx = sws_getCachedContext ( m_sws_ctx, m_pCodecCtx->width, m_pCodecCtx->height, m_pCodecCtx->pix_fmt, m_desWidth, m_desHeight, (AVPixelFormat)m_sws_fmt, SWS_BILINEAR, NULL, NULL, NULL ); int *inv_coefficients; int *coefficients; int srcRange, dstRange; int brightness, contrast, saturation; if (sws_getColorspaceDetails(m_sws_ctx, &inv_coefficients, &srcRange, &coefficients, &dstRange, &brightness, &contrast, &saturation) != -1) { srcRange = 0; dstRange = 0; sws_setColorspaceDetails(m_sws_ctx, inv_coefficients, srcRange, coefficients, dstRange, brightness, contrast, saturation); } } #endif }
bool CFFmpegImage::CreateThumbnailFromSurface(unsigned char* bufferin, unsigned int width, unsigned int height, unsigned int format, unsigned int pitch, const std::string& destFile, unsigned char* &bufferout, unsigned int &bufferoutSize) { // It seems XB_FMT_A8R8G8B8 mean RGBA and not ARGB if (format != XB_FMT_A8R8G8B8) { CLog::Log(LOGERROR, "Supplied format: %d is not supported.", format); return false; } bool jpg_output = false; if (m_strMimeType == "image/jpeg" || m_strMimeType == "image/jpg") jpg_output = true; else if (m_strMimeType == "image/png") jpg_output = false; else { CLog::Log(LOGERROR, "Output Format is not supported: %s is not supported.", destFile.c_str()); return false; } ThumbDataManagement tdm; tdm.codec = avcodec_find_encoder(jpg_output ? AV_CODEC_ID_MJPEG : AV_CODEC_ID_PNG); if (!tdm.codec) { CLog::Log(LOGERROR, "Your are missing a working encoder for format: %d", jpg_output ? AV_CODEC_ID_MJPEG : AV_CODEC_ID_PNG); return false; } tdm.avOutctx = avcodec_alloc_context3(tdm.codec); if (!tdm.avOutctx) { CLog::Log(LOGERROR, "Could not allocate context for thumbnail: %s", destFile.c_str()); return false; } tdm.avOutctx->height = height; tdm.avOutctx->width = width; tdm.avOutctx->time_base.num = 1; tdm.avOutctx->time_base.den = 1; tdm.avOutctx->pix_fmt = jpg_output ? AV_PIX_FMT_YUVJ420P : AV_PIX_FMT_RGBA; tdm.avOutctx->flags = CODEC_FLAG_QSCALE; tdm.avOutctx->mb_lmin = tdm.avOutctx->qmin * FF_QP2LAMBDA; tdm.avOutctx->mb_lmax = tdm.avOutctx->qmax * FF_QP2LAMBDA; tdm.avOutctx->global_quality = tdm.avOutctx->qmin * FF_QP2LAMBDA; unsigned int internalBufOutSize = 0; int size = avpicture_get_size(tdm.avOutctx->pix_fmt, tdm.avOutctx->width, tdm.avOutctx->height); if (size < 0) { CLog::Log(LOGERROR, "Could not compute picture size for thumbnail: %s", destFile.c_str()); CleanupLocalOutputBuffer(); return false; } internalBufOutSize = (unsigned int) size; m_outputBuffer = (uint8_t*) av_malloc(internalBufOutSize); if (!m_outputBuffer) { CLog::Log(LOGERROR, "Could not generate allocate memory for thumbnail: %s", destFile.c_str()); CleanupLocalOutputBuffer(); return false; } tdm.intermediateBuffer = (uint8_t*) av_malloc(internalBufOutSize); if (!tdm.intermediateBuffer) { CLog::Log(LOGERROR, "Could not allocate memory for thumbnail: %s", destFile.c_str()); CleanupLocalOutputBuffer(); return false; } if (avcodec_open2(tdm.avOutctx, tdm.codec, NULL) < 0) { CLog::Log(LOGERROR, "Could not open avcodec context thumbnail: %s", destFile.c_str()); CleanupLocalOutputBuffer(); return false; } tdm.frame_input = av_frame_alloc(); if (!tdm.frame_input) { CLog::Log(LOGERROR, "Could not allocate frame for thumbnail: %s", destFile.c_str()); CleanupLocalOutputBuffer(); return false; } // convert the RGB32 frame to AV_PIX_FMT_YUV420P - we use this later on as AV_PIX_FMT_YUVJ420P tdm.frame_temporary = av_frame_alloc(); if (!tdm.frame_temporary) { CLog::Log(LOGERROR, "Could not allocate frame for thumbnail: %s", destFile.c_str()); CleanupLocalOutputBuffer(); return false; } if (avpicture_fill((AVPicture*)tdm.frame_temporary, tdm.intermediateBuffer, jpg_output ? AV_PIX_FMT_YUV420P : AV_PIX_FMT_RGBA, width, height) < 0) { CLog::Log(LOGERROR, "Could not fill picture for thumbnail: %s", destFile.c_str()); CleanupLocalOutputBuffer(); return false; } uint8_t* src[] = { bufferin, NULL, NULL, NULL }; int srcStride[] = { (int) pitch, 0, 0, 0}; //input size == output size which means only pix_fmt conversion tdm.sws = sws_getContext(width, height, AV_PIX_FMT_RGB32, width, height, jpg_output ? AV_PIX_FMT_YUV420P : AV_PIX_FMT_RGBA, 0, 0, 0, 0); if (!tdm.sws) { CLog::Log(LOGERROR, "Could not setup scaling context for thumbnail: %s", destFile.c_str()); CleanupLocalOutputBuffer(); return false; } // Setup jpeg range for sws if (jpg_output) { int* inv_table = nullptr; int* table = nullptr; int srcRange, dstRange, brightness, contrast, saturation; if (sws_getColorspaceDetails(tdm.sws, &inv_table, &srcRange, &table, &dstRange, &brightness, &contrast, &saturation) < 0) { CLog::Log(LOGERROR, "SWS_SCALE failed to get ColorSpaceDetails for thumbnail: %s", destFile.c_str()); CleanupLocalOutputBuffer(); return false; } dstRange = 1; // jpeg full range yuv420p output srcRange = 0; // full range RGB32 input if (sws_setColorspaceDetails(tdm.sws, inv_table, srcRange, table, dstRange, brightness, contrast, saturation) < 0) { CLog::Log(LOGERROR, "SWS_SCALE failed to set ColorSpace Details for thumbnail: %s", destFile.c_str()); CleanupLocalOutputBuffer(); return false; } } if (sws_scale(tdm.sws, src, srcStride, 0, height, tdm.frame_temporary->data, tdm.frame_temporary->linesize) < 0) { CLog::Log(LOGERROR, "SWS_SCALE failed for thumbnail: %s", destFile.c_str()); CleanupLocalOutputBuffer(); return false; } tdm.frame_input->pts = 1; tdm.frame_input->quality = tdm.avOutctx->global_quality; tdm.frame_input->data[0] = (uint8_t*) tdm.frame_temporary->data[0]; tdm.frame_input->data[1] = (uint8_t*) tdm.frame_temporary->data[1]; tdm.frame_input->data[2] = (uint8_t*) tdm.frame_temporary->data[2]; tdm.frame_input->height = height; tdm.frame_input->width = width; tdm.frame_input->linesize[0] = tdm.frame_temporary->linesize[0]; tdm.frame_input->linesize[1] = tdm.frame_temporary->linesize[1]; tdm.frame_input->linesize[2] = tdm.frame_temporary->linesize[2]; // this is deprecated but mjpeg is not yet transitioned tdm.frame_input->format = jpg_output ? AV_PIX_FMT_YUVJ420P : AV_PIX_FMT_RGBA; int got_package = 0; AVPacket avpkt; av_init_packet(&avpkt); avpkt.data = m_outputBuffer; avpkt.size = internalBufOutSize; if ((avcodec_encode_video2(tdm.avOutctx, &avpkt, tdm.frame_input, &got_package) < 0) || (got_package == 0)) { CLog::Log(LOGERROR, "Could not encode thumbnail: %s", destFile.c_str()); CleanupLocalOutputBuffer(); return false; } bufferoutSize = avpkt.size; bufferout = m_outputBuffer; return true; }
bool CFFmpegImage::Decode(unsigned char * const pixels, unsigned int width, unsigned int height, unsigned int pitch, unsigned int format) { if (m_width == 0 || m_height == 0 || format != XB_FMT_A8R8G8B8) return false; if (!m_pFrame || !m_pFrame->data[0]) { CLog::LogFunction(LOGERROR, __FUNCTION__, "AVFrame member not allocated"); return false; } AVPicture* pictureRGB = static_cast<AVPicture*>(av_mallocz(sizeof(AVPicture))); if (!pictureRGB) { CLog::LogFunction(LOGERROR, __FUNCTION__, "AVPicture could not be allocated"); return false; } int size = avpicture_fill(pictureRGB, NULL, AV_PIX_FMT_RGB32, width, height); if (size < 0) { CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not allocate AVPicture member with %i x %i pixes", width, height); av_free(pictureRGB); return false; } bool needsCopy = false; int pixelsSize = pitch * height; if (size == pixelsSize && (int) pitch == pictureRGB->linesize[0]) { // We can use the pixels buffer directly pictureRGB->data[0] = pixels; } else { // We need an extra buffer and copy it manually afterwards if (avpicture_alloc(pictureRGB, AV_PIX_FMT_RGB32, width, height) < 0) { CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not allocate temp buffer of size %i bytes", size); av_free(pictureRGB); return false; } needsCopy = true; } // Especially jpeg formats are full range this we need to take care here // Input Formats like RGBA are handled correctly automatically AVColorRange range = av_frame_get_color_range(m_pFrame); AVPixelFormat pixFormat = ConvertFormats(m_pFrame); // assumption quadratic maximums e.g. 2048x2048 float ratio = m_width / (float) m_height; unsigned int nHeight = m_originalHeight; unsigned int nWidth = m_originalWidth; if (nHeight > height) { nHeight = height; nWidth = (unsigned int) (nHeight * ratio + 0.5f); } if (nWidth > width) { nWidth = width; nHeight = (unsigned int) (nWidth / ratio + 0.5f); } struct SwsContext* context = sws_getContext(m_originalWidth, m_originalHeight, pixFormat, nWidth, nHeight, AV_PIX_FMT_RGB32, SWS_BICUBIC, NULL, NULL, NULL); if (range == AVCOL_RANGE_JPEG) { int* inv_table = nullptr; int* table = nullptr; int srcRange, dstRange, brightness, contrast, saturation; sws_getColorspaceDetails(context, &inv_table, &srcRange, &table, &dstRange, &brightness, &contrast, &saturation); srcRange = 1; sws_setColorspaceDetails(context, inv_table, srcRange, table, dstRange, brightness, contrast, saturation); } sws_scale(context, m_pFrame->data, m_pFrame->linesize, 0, m_originalHeight, pictureRGB->data, pictureRGB->linesize); sws_freeContext(context); if (needsCopy) { int minPitch = std::min((int)pitch, pictureRGB->linesize[0]); if (minPitch < 0) { CLog::LogFunction(LOGERROR, __FUNCTION__, "negative pitch or height"); av_free(pictureRGB); return false; } const unsigned char *src = pictureRGB->data[0]; unsigned char* dst = pixels; for (unsigned int y = 0; y < nHeight; y++) { memcpy(dst, src, minPitch); src += pictureRGB->linesize[0]; dst += pitch; } avpicture_free(pictureRGB); } pictureRGB->data[0] = nullptr; avpicture_free(pictureRGB); // update width and height original dimensions are kept m_height = nHeight; m_width = nWidth; return true; }
static ImBuf *avi_fetchibuf(struct anim *anim, int position) { ImBuf *ibuf = NULL; int *tmp; int y; if (anim == NULL) { return NULL; } #if defined(_WIN32) && !defined(FREE_WINDOWS) if (anim->avistreams) { LPBITMAPINFOHEADER lpbi; if (anim->pgf) { lpbi = AVIStreamGetFrame(anim->pgf, position + AVIStreamStart(anim->pavi[anim->firstvideo])); if (lpbi) { ibuf = IMB_ibImageFromMemory((unsigned char *) lpbi, 100, IB_rect, anim->colorspace, "<avi_fetchibuf>"); //Oh brother... } } } else { #else if (1) { #endif ibuf = IMB_allocImBuf(anim->x, anim->y, 24, IB_rect); tmp = AVI_read_frame(anim->avi, AVI_FORMAT_RGB32, position, AVI_get_stream(anim->avi, AVIST_VIDEO, 0)); if (tmp == NULL) { printf("Error reading frame from AVI: '%s'\n", anim->name); IMB_freeImBuf(ibuf); return NULL; } for (y = 0; y < anim->y; y++) { memcpy(&(ibuf->rect)[((anim->y - y) - 1) * anim->x], &tmp[y * anim->x], anim->x * 4); } MEM_freeN(tmp); } ibuf->rect_colorspace = colormanage_colorspace_get_named(anim->colorspace); return ibuf; } #endif /* WITH_AVI */ #ifdef WITH_FFMPEG static int startffmpeg(struct anim *anim) { int i, videoStream; AVCodec *pCodec; AVFormatContext *pFormatCtx = NULL; AVCodecContext *pCodecCtx; int frs_num; double frs_den; int streamcount; #ifdef FFMPEG_SWSCALE_COLOR_SPACE_SUPPORT /* The following for color space determination */ int srcRange, dstRange, brightness, contrast, saturation; int *table; const int *inv_table; #endif if (anim == 0) return(-1); streamcount = anim->streamindex; if (avformat_open_input(&pFormatCtx, anim->name, NULL, NULL) != 0) { return -1; } if (avformat_find_stream_info(pFormatCtx, NULL) < 0) { av_close_input_file(pFormatCtx); return -1; } av_dump_format(pFormatCtx, 0, anim->name, 0); /* Find the video stream */ videoStream = -1; for (i = 0; i < pFormatCtx->nb_streams; i++) if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) { if (streamcount > 0) { streamcount--; continue; } videoStream = i; break; } if (videoStream == -1) { av_close_input_file(pFormatCtx); return -1; } pCodecCtx = pFormatCtx->streams[videoStream]->codec; /* Find the decoder for the video stream */ pCodec = avcodec_find_decoder(pCodecCtx->codec_id); if (pCodec == NULL) { av_close_input_file(pFormatCtx); return -1; } pCodecCtx->workaround_bugs = 1; if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) { av_close_input_file(pFormatCtx); return -1; } anim->duration = ceil(pFormatCtx->duration * av_q2d(pFormatCtx->streams[videoStream]->r_frame_rate) / AV_TIME_BASE); frs_num = pFormatCtx->streams[videoStream]->r_frame_rate.num; frs_den = pFormatCtx->streams[videoStream]->r_frame_rate.den; frs_den *= AV_TIME_BASE; while (frs_num % 10 == 0 && frs_den >= 2.0 && frs_num > 10) { frs_num /= 10; frs_den /= 10; } anim->frs_sec = frs_num; anim->frs_sec_base = frs_den; anim->params = 0; anim->x = pCodecCtx->width; anim->y = av_get_cropped_height_from_codec(pCodecCtx); anim->pFormatCtx = pFormatCtx; anim->pCodecCtx = pCodecCtx; anim->pCodec = pCodec; anim->videoStream = videoStream; anim->interlacing = 0; anim->orientation = 0; anim->framesize = anim->x * anim->y * 4; anim->curposition = -1; anim->last_frame = 0; anim->last_pts = -1; anim->next_pts = -1; anim->next_packet.stream_index = -1; anim->pFrame = avcodec_alloc_frame(); anim->pFrameComplete = FALSE; anim->pFrameDeinterlaced = avcodec_alloc_frame(); anim->pFrameRGB = avcodec_alloc_frame(); if (avpicture_get_size(PIX_FMT_RGBA, anim->x, anim->y) != anim->x * anim->y * 4) { fprintf(stderr, "ffmpeg has changed alloc scheme ... ARGHHH!\n"); avcodec_close(anim->pCodecCtx); av_close_input_file(anim->pFormatCtx); av_free(anim->pFrameRGB); av_free(anim->pFrameDeinterlaced); av_free(anim->pFrame); anim->pCodecCtx = NULL; return -1; } if (anim->ib_flags & IB_animdeinterlace) { avpicture_fill((AVPicture *) anim->pFrameDeinterlaced, MEM_callocN(avpicture_get_size( anim->pCodecCtx->pix_fmt, anim->pCodecCtx->width, anim->pCodecCtx->height), "ffmpeg deinterlace"), anim->pCodecCtx->pix_fmt, anim->pCodecCtx->width, anim->pCodecCtx->height); } if (pCodecCtx->has_b_frames) { anim->preseek = 25; /* FIXME: detect gopsize ... */ } else { anim->preseek = 0; } anim->img_convert_ctx = sws_getContext( anim->x, anim->y, anim->pCodecCtx->pix_fmt, anim->x, anim->y, PIX_FMT_RGBA, SWS_FAST_BILINEAR | SWS_PRINT_INFO | SWS_FULL_CHR_H_INT, NULL, NULL, NULL); if (!anim->img_convert_ctx) { fprintf(stderr, "Can't transform color space??? Bailing out...\n"); avcodec_close(anim->pCodecCtx); av_close_input_file(anim->pFormatCtx); av_free(anim->pFrameRGB); av_free(anim->pFrameDeinterlaced); av_free(anim->pFrame); anim->pCodecCtx = NULL; return -1; } #ifdef FFMPEG_SWSCALE_COLOR_SPACE_SUPPORT /* Try do detect if input has 0-255 YCbCR range (JFIF Jpeg MotionJpeg) */ if (!sws_getColorspaceDetails(anim->img_convert_ctx, (int **)&inv_table, &srcRange, &table, &dstRange, &brightness, &contrast, &saturation)) { srcRange = srcRange || anim->pCodecCtx->color_range == AVCOL_RANGE_JPEG; inv_table = sws_getCoefficients(anim->pCodecCtx->colorspace); if (sws_setColorspaceDetails(anim->img_convert_ctx, (int *)inv_table, srcRange, table, dstRange, brightness, contrast, saturation)) { fprintf(stderr, "Warning: Could not set libswscale colorspace details.\n"); } } else { fprintf(stderr, "Warning: Could not set libswscale colorspace details.\n"); } #endif return (0); }
static int startffmpeg(struct anim *anim) { int i, videoStream; AVCodec *pCodec; AVFormatContext *pFormatCtx = NULL; AVCodecContext *pCodecCtx; int frs_num; double frs_den; int streamcount; #ifdef FFMPEG_SWSCALE_COLOR_SPACE_SUPPORT /* The following for color space determination */ int srcRange, dstRange, brightness, contrast, saturation; int *table; const int *inv_table; #endif if (anim == 0) return(-1); streamcount = anim->streamindex; if (avformat_open_input(&pFormatCtx, anim->name, NULL, NULL) != 0) { return -1; } if (avformat_find_stream_info(pFormatCtx, NULL) < 0) { av_close_input_file(pFormatCtx); return -1; } av_dump_format(pFormatCtx, 0, anim->name, 0); /* Find the video stream */ videoStream = -1; for (i = 0; i < pFormatCtx->nb_streams; i++) if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) { if (streamcount > 0) { streamcount--; continue; } videoStream = i; break; } if (videoStream == -1) { av_close_input_file(pFormatCtx); return -1; } pCodecCtx = pFormatCtx->streams[videoStream]->codec; /* Find the decoder for the video stream */ pCodec = avcodec_find_decoder(pCodecCtx->codec_id); if (pCodec == NULL) { av_close_input_file(pFormatCtx); return -1; } pCodecCtx->workaround_bugs = 1; if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) { av_close_input_file(pFormatCtx); return -1; } anim->duration = ceil(pFormatCtx->duration * av_q2d(pFormatCtx->streams[videoStream]->r_frame_rate) / AV_TIME_BASE); frs_num = pFormatCtx->streams[videoStream]->r_frame_rate.num; frs_den = pFormatCtx->streams[videoStream]->r_frame_rate.den; frs_den *= AV_TIME_BASE; while (frs_num % 10 == 0 && frs_den >= 2.0 && frs_num > 10) { frs_num /= 10; frs_den /= 10; } anim->frs_sec = frs_num; anim->frs_sec_base = frs_den; anim->params = 0; anim->x = pCodecCtx->width; anim->y = av_get_cropped_height_from_codec(pCodecCtx); anim->pFormatCtx = pFormatCtx; anim->pCodecCtx = pCodecCtx; anim->pCodec = pCodec; anim->videoStream = videoStream; anim->interlacing = 0; anim->orientation = 0; anim->framesize = anim->x * anim->y * 4; anim->curposition = -1; anim->last_frame = 0; anim->last_pts = -1; anim->next_pts = -1; anim->next_packet.stream_index = -1; anim->pFrame = avcodec_alloc_frame(); anim->pFrameComplete = FALSE; anim->pFrameDeinterlaced = avcodec_alloc_frame(); anim->pFrameRGB = avcodec_alloc_frame(); if (avpicture_get_size(PIX_FMT_RGBA, anim->x, anim->y) != anim->x * anim->y * 4) { fprintf(stderr, "ffmpeg has changed alloc scheme ... ARGHHH!\n"); avcodec_close(anim->pCodecCtx); av_close_input_file(anim->pFormatCtx); av_free(anim->pFrameRGB); av_free(anim->pFrameDeinterlaced); av_free(anim->pFrame); anim->pCodecCtx = NULL; return -1; } if (anim->ib_flags & IB_animdeinterlace) { avpicture_fill((AVPicture *) anim->pFrameDeinterlaced, MEM_callocN(avpicture_get_size( anim->pCodecCtx->pix_fmt, anim->pCodecCtx->width, anim->pCodecCtx->height), "ffmpeg deinterlace"), anim->pCodecCtx->pix_fmt, anim->pCodecCtx->width, anim->pCodecCtx->height); } if (pCodecCtx->has_b_frames) { anim->preseek = 25; /* FIXME: detect gopsize ... */ } else { anim->preseek = 0; } anim->img_convert_ctx = sws_getContext( anim->x, anim->y, anim->pCodecCtx->pix_fmt, anim->x, anim->y, PIX_FMT_RGBA, SWS_FAST_BILINEAR | SWS_PRINT_INFO | SWS_FULL_CHR_H_INT, NULL, NULL, NULL); if (!anim->img_convert_ctx) { fprintf(stderr, "Can't transform color space??? Bailing out...\n"); avcodec_close(anim->pCodecCtx); av_close_input_file(anim->pFormatCtx); av_free(anim->pFrameRGB); av_free(anim->pFrameDeinterlaced); av_free(anim->pFrame); anim->pCodecCtx = NULL; return -1; } #ifdef FFMPEG_SWSCALE_COLOR_SPACE_SUPPORT /* Try do detect if input has 0-255 YCbCR range (JFIF Jpeg MotionJpeg) */ if (!sws_getColorspaceDetails(anim->img_convert_ctx, (int **)&inv_table, &srcRange, &table, &dstRange, &brightness, &contrast, &saturation)) { srcRange = srcRange || anim->pCodecCtx->color_range == AVCOL_RANGE_JPEG; inv_table = sws_getCoefficients(anim->pCodecCtx->colorspace); if (sws_setColorspaceDetails(anim->img_convert_ctx, (int *)inv_table, srcRange, table, dstRange, brightness, contrast, saturation)) { fprintf(stderr, "Warning: Could not set libswscale colorspace details.\n"); } } else { fprintf(stderr, "Warning: Could not set libswscale colorspace details.\n"); } #endif return (0); }
bool CFFmpegImage::DecodeFrame(AVFrame* frame, unsigned int width, unsigned int height, unsigned int pitch, unsigned char * const pixels) { if (pixels == nullptr) { CLog::Log(LOGERROR, "%s - No valid buffer pointer (nullptr) passed", __FUNCTION__); return false; } AVFrame* pictureRGB = av_frame_alloc(); if (!pictureRGB) { CLog::LogF(LOGERROR, "AVFrame could not be allocated"); return false; } // we align on 16 as the input provided by the Texture also aligns the buffer size to 16 int size = av_image_fill_arrays(pictureRGB->data, pictureRGB->linesize, NULL, AV_PIX_FMT_RGB32, width, height, 16); if (size < 0) { CLog::LogF(LOGERROR, "Could not allocate AVFrame member with %i x %i pixes", width, height); av_frame_free(&pictureRGB); return false; } bool needsCopy = false; int pixelsSize = pitch * height; bool aligned = (((uintptr_t)(const void *)(pixels)) % (32) == 0); if (!aligned) CLog::Log(LOGDEBUG, "Alignment of external buffer is not suitable for ffmpeg intrinsics - please fix your malloc"); if (aligned && size == pixelsSize && (int)pitch == pictureRGB->linesize[0]) { // We can use the pixels buffer directly pictureRGB->data[0] = pixels; } else { // We need an extra buffer and copy it manually afterwards pictureRGB->format = AV_PIX_FMT_RGB32; pictureRGB->width = width; pictureRGB->height = height; // we copy the data manually later so give a chance to intrinsics (e.g. mmx, neon) if (av_frame_get_buffer(pictureRGB, 32) < 0) { CLog::LogF(LOGERROR, "Could not allocate temp buffer of size %i bytes", size); av_frame_free(&pictureRGB); return false; } needsCopy = true; } // Especially jpeg formats are full range this we need to take care here // Input Formats like RGBA are handled correctly automatically AVColorRange range = frame->color_range; AVPixelFormat pixFormat = ConvertFormats(frame); // assumption quadratic maximums e.g. 2048x2048 float ratio = m_width / (float)m_height; unsigned int nHeight = m_originalHeight; unsigned int nWidth = m_originalWidth; if (nHeight > height) { nHeight = height; nWidth = (unsigned int)(nHeight * ratio + 0.5f); } if (nWidth > width) { nWidth = width; nHeight = (unsigned int)(nWidth / ratio + 0.5f); } struct SwsContext* context = sws_getContext(m_originalWidth, m_originalHeight, pixFormat, nWidth, nHeight, AV_PIX_FMT_RGB32, SWS_BICUBIC, NULL, NULL, NULL); if (range == AVCOL_RANGE_JPEG) { int* inv_table = nullptr; int* table = nullptr; int srcRange, dstRange, brightness, contrast, saturation; sws_getColorspaceDetails(context, &inv_table, &srcRange, &table, &dstRange, &brightness, &contrast, &saturation); srcRange = 1; sws_setColorspaceDetails(context, inv_table, srcRange, table, dstRange, brightness, contrast, saturation); } sws_scale(context, frame->data, frame->linesize, 0, m_originalHeight, pictureRGB->data, pictureRGB->linesize); sws_freeContext(context); if (needsCopy) { int minPitch = std::min((int)pitch, pictureRGB->linesize[0]); if (minPitch < 0) { CLog::LogF(LOGERROR, "negative pitch or height"); av_frame_free(&pictureRGB); return false; } const unsigned char *src = pictureRGB->data[0]; unsigned char* dst = pixels; for (unsigned int y = 0; y < nHeight; y++) { memcpy(dst, src, minPitch); src += pictureRGB->linesize[0]; dst += pitch; } av_frame_free(&pictureRGB); } else { // we only lended the data so don't get it deleted pictureRGB->data[0] = nullptr; av_frame_free(&pictureRGB); } // update width and height original dimensions are kept m_height = nHeight; m_width = nWidth; return true; }