AVFrame* CFFmpegImage::ExtractFrame() { if (!m_fctx || !m_fctx->streams[0]) { CLog::LogFunction(LOGERROR, __FUNCTION__, "No valid format context or stream"); return nullptr; } AVPacket pkt; AVFrame* frame = av_frame_alloc(); int frame_decoded = 0; int ret = 0; ret = av_read_frame(m_fctx, &pkt); if (ret < 0) { CLog::Log(LOGDEBUG, "Error [%d] while reading frame: %s\n", ret, strerror(AVERROR(ret))); av_frame_free(&frame); av_packet_unref(&pkt); return nullptr; } ret = DecodeFFmpegFrame(m_codec_ctx, frame, &frame_decoded, &pkt); if (ret < 0 || frame_decoded == 0 || !frame) { CLog::Log(LOGDEBUG, "Error [%d] while decoding frame: %s\n", ret, strerror(AVERROR(ret))); av_frame_free(&frame); av_packet_unref(&pkt); return nullptr; } //we need milliseconds av_frame_set_pkt_duration(frame, av_rescale_q(frame->pkt_duration, m_fctx->streams[0]->time_base, AVRational{ 1, 1000 })); m_height = frame->height; m_width = frame->width; m_originalWidth = m_width; m_originalHeight = m_height; const AVPixFmtDescriptor* pixDescriptor = av_pix_fmt_desc_get(static_cast<AVPixelFormat>(frame->format)); if (pixDescriptor && ((pixDescriptor->flags & (AV_PIX_FMT_FLAG_ALPHA | AV_PIX_FMT_FLAG_PAL)) != 0)) m_hasAlpha = true; AVDictionary* dic = av_frame_get_metadata(frame); AVDictionaryEntry* entry = NULL; if (dic) { entry = av_dict_get(dic, "Orientation", NULL, AV_DICT_MATCH_CASE); if (entry && entry->value) { int orientation = atoi(entry->value); // only values between including 0 and including 8 // http://sylvana.net/jpegcrop/exif_orientation.html if (orientation >= 0 && orientation <= 8) m_orientation = (unsigned int)orientation; } } av_packet_unref(&pkt); return frame; }
static void get_metadata_from_av_frame(struct af_instance *af, AVFrame *frame) { #if HAVE_AVFRAME_METADATA struct priv *p = af->priv; if (!p->metadata) p->metadata = talloc_zero(p, struct mp_tags); mp_tags_copy_from_av_dictionary(p->metadata, av_frame_get_metadata(frame)); #endif }
int avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src) { dst->pts = src->pts; dst->pos = av_frame_get_pkt_pos(src); dst->format = src->format; av_dict_free(&dst->metadata); av_dict_copy(&dst->metadata, av_frame_get_metadata(src), 0); switch (dst->type) { case AVMEDIA_TYPE_VIDEO: dst->video->w = src->width; dst->video->h = src->height; dst->video->sample_aspect_ratio = src->sample_aspect_ratio; dst->video->interlaced = src->interlaced_frame; dst->video->top_field_first = src->top_field_first; dst->video->key_frame = src->key_frame; dst->video->pict_type = src->pict_type; av_freep(&dst->video->qp_table); dst->video->qp_table_linesize = 0; if (src->qscale_table) { int qsize = src->qstride ? src->qstride * ((src->height+15)/16) : (src->width+15)/16; dst->video->qp_table = av_malloc(qsize); if (!dst->video->qp_table) return AVERROR(ENOMEM); dst->video->qp_table_linesize = src->qstride; dst->video->qp_table_size = qsize; memcpy(dst->video->qp_table, src->qscale_table, qsize); } break; case AVMEDIA_TYPE_AUDIO: dst->audio->sample_rate = src->sample_rate; dst->audio->channel_layout = src->channel_layout; dst->audio->channels = src->channels; if(src->channels < av_get_channel_layout_nb_channels(src->channel_layout)) { av_log(NULL, AV_LOG_ERROR, "libavfilter does not support this channel layout\n"); return AVERROR(EINVAL); } break; default: return AVERROR(EINVAL); } return 0; }
static double get_concatdec_select(AVFrame *frame, int64_t pts) { AVDictionary *metadata = av_frame_get_metadata(frame); AVDictionaryEntry *start_time_entry = av_dict_get(metadata, "lavf.concatdec.start_time", NULL, 0); AVDictionaryEntry *duration_entry = av_dict_get(metadata, "lavf.concatdec.duration", NULL, 0); if (start_time_entry) { int64_t start_time = strtoll(start_time_entry->value, NULL, 10); if (pts >= start_time) { if (duration_entry) { int64_t duration = strtoll(duration_entry->value, NULL, 10); if (pts < start_time + duration) return -1; else return 0; } return -1; } return 0; } return NAN; }
bool CDVDVideoCodecFFmpeg::GetPictureCommon(DVDVideoPicture* pDvdVideoPicture) { if (!m_pFrame) return false; pDvdVideoPicture->iWidth = m_pFrame->width; pDvdVideoPicture->iHeight = m_pFrame->height; /* crop of 10 pixels if demuxer asked it */ if(m_pCodecContext->coded_width && m_pCodecContext->coded_width < (int)pDvdVideoPicture->iWidth && m_pCodecContext->coded_width > (int)pDvdVideoPicture->iWidth - 10) pDvdVideoPicture->iWidth = m_pCodecContext->coded_width; if(m_pCodecContext->coded_height && m_pCodecContext->coded_height < (int)pDvdVideoPicture->iHeight && m_pCodecContext->coded_height > (int)pDvdVideoPicture->iHeight - 10) pDvdVideoPicture->iHeight = m_pCodecContext->coded_height; double aspect_ratio; /* use variable in the frame */ AVRational pixel_aspect = m_pFrame->sample_aspect_ratio; if (pixel_aspect.num == 0) aspect_ratio = 0; else aspect_ratio = av_q2d(pixel_aspect) * pDvdVideoPicture->iWidth / pDvdVideoPicture->iHeight; if (aspect_ratio <= 0.0) aspect_ratio = (float)pDvdVideoPicture->iWidth / (float)pDvdVideoPicture->iHeight; /* XXX: we suppose the screen has a 1.0 pixel ratio */ // CDVDVideo will compensate it. pDvdVideoPicture->iDisplayHeight = pDvdVideoPicture->iHeight; pDvdVideoPicture->iDisplayWidth = ((int)RINT(pDvdVideoPicture->iHeight * aspect_ratio)) & -3; if (pDvdVideoPicture->iDisplayWidth > pDvdVideoPicture->iWidth) { pDvdVideoPicture->iDisplayWidth = pDvdVideoPicture->iWidth; pDvdVideoPicture->iDisplayHeight = ((int)RINT(pDvdVideoPicture->iWidth / aspect_ratio)) & -3; } pDvdVideoPicture->pts = DVD_NOPTS_VALUE; AVDictionaryEntry * entry = av_dict_get(av_frame_get_metadata(m_pFrame), "stereo_mode", NULL, 0); if(entry && entry->value) { strncpy(pDvdVideoPicture->stereo_mode, (const char*)entry->value, sizeof(pDvdVideoPicture->stereo_mode)); pDvdVideoPicture->stereo_mode[sizeof(pDvdVideoPicture->stereo_mode)-1] = '\0'; } pDvdVideoPicture->iRepeatPicture = 0.5 * m_pFrame->repeat_pict; pDvdVideoPicture->iFlags = DVP_FLAG_ALLOCATED; pDvdVideoPicture->iFlags |= m_pFrame->interlaced_frame ? DVP_FLAG_INTERLACED : 0; pDvdVideoPicture->iFlags |= m_pFrame->top_field_first ? DVP_FLAG_TOP_FIELD_FIRST: 0; if (m_codecControlFlags & DVD_CODEC_CTRL_DROP) pDvdVideoPicture->iFlags |= DVP_FLAG_DROPPED; pDvdVideoPicture->chroma_position = m_pCodecContext->chroma_sample_location; pDvdVideoPicture->color_primaries = m_pCodecContext->color_primaries; pDvdVideoPicture->color_transfer = m_pCodecContext->color_trc; pDvdVideoPicture->color_matrix = m_pCodecContext->colorspace; if(m_pCodecContext->color_range == AVCOL_RANGE_JPEG || m_pCodecContext->pix_fmt == AV_PIX_FMT_YUVJ420P) pDvdVideoPicture->color_range = 1; else pDvdVideoPicture->color_range = 0; int qscale_type; pDvdVideoPicture->qp_table = av_frame_get_qp_table(m_pFrame, &pDvdVideoPicture->qstride, &qscale_type); switch (qscale_type) { case FF_QSCALE_TYPE_MPEG1: pDvdVideoPicture->qscale_type = DVP_QSCALE_MPEG1; break; case FF_QSCALE_TYPE_MPEG2: pDvdVideoPicture->qscale_type = DVP_QSCALE_MPEG2; break; case FF_QSCALE_TYPE_H264: pDvdVideoPicture->qscale_type = DVP_QSCALE_H264; break; default: pDvdVideoPicture->qscale_type = DVP_QSCALE_UNKNOWN; } if (pDvdVideoPicture->iRepeatPicture) pDvdVideoPicture->dts = DVD_NOPTS_VALUE; else pDvdVideoPicture->dts = m_dts; m_dts = DVD_NOPTS_VALUE; int64_t bpts = av_frame_get_best_effort_timestamp(m_pFrame); if (bpts != AV_NOPTS_VALUE) { pDvdVideoPicture->pts = (double)bpts * DVD_TIME_BASE / AV_TIME_BASE; if (pDvdVideoPicture->pts == m_decoderPts) { pDvdVideoPicture->iRepeatPicture = -0.5; pDvdVideoPicture->pts = DVD_NOPTS_VALUE; pDvdVideoPicture->dts = DVD_NOPTS_VALUE; } } else pDvdVideoPicture->pts = DVD_NOPTS_VALUE; if (pDvdVideoPicture->pts != DVD_NOPTS_VALUE) m_decoderPts = pDvdVideoPicture->pts; if (m_requestSkipDeint) { pDvdVideoPicture->iFlags |= DVD_CODEC_CTRL_SKIPDEINT; m_skippedDeint++; } m_requestSkipDeint = false; pDvdVideoPicture->iFlags |= m_codecControlFlags; if (!m_started) pDvdVideoPicture->iFlags |= DVP_FLAG_DROPPED; return true; }
static int filter_frame(AVFilterLink *inlink, AVFrame *frame) { AVFilterContext *ctx = inlink->dst; AVFilterLink *outlink = ctx->outputs[0]; MetadataContext *s = ctx->priv; AVDictionary *metadata = av_frame_get_metadata(frame); AVDictionaryEntry *e; if (!metadata) return ff_filter_frame(outlink, frame); e = av_dict_get(metadata, !s->key ? "" : s->key, NULL, !s->key ? AV_DICT_IGNORE_SUFFIX: 0); switch (s->mode) { case METADATA_SELECT: if (!s->value && e && e->value) { return ff_filter_frame(outlink, frame); } else if (s->value && e && e->value && s->compare(s, e->value, s->value)) { return ff_filter_frame(outlink, frame); } break; case METADATA_ADD: if (e && e->value) { ; } else { av_dict_set(&metadata, s->key, s->value, 0); } return ff_filter_frame(outlink, frame); break; case METADATA_MODIFY: if (e && e->value) { av_dict_set(&metadata, s->key, s->value, 0); } return ff_filter_frame(outlink, frame); break; case METADATA_PRINT: if (!s->key && e) { s->print(ctx, "frame:%-4"PRId64" pts:%-7s pts_time:%-7s\n", inlink->frame_count, av_ts2str(frame->pts), av_ts2timestr(frame->pts, &inlink->time_base)); s->print(ctx, "%s=%s\n", e->key, e->value); while ((e = av_dict_get(metadata, "", e, AV_DICT_IGNORE_SUFFIX)) != NULL) { s->print(ctx, "%s=%s\n", e->key, e->value); } } else if (e && e->value && (!s->value || (e->value && s->compare(s, e->value, s->value)))) { s->print(ctx, "frame:%-4"PRId64" pts:%-7s pts_time:%-7s\n", inlink->frame_count, av_ts2str(frame->pts), av_ts2timestr(frame->pts, &inlink->time_base)); s->print(ctx, "%s=%s\n", s->key, e->value); } return ff_filter_frame(outlink, frame); break; case METADATA_DELETE: if (e && e->value && s->value && s->compare(s, e->value, s->value)) { av_dict_set(&metadata, s->key, NULL, 0); } else if (e && e->value) { av_dict_set(&metadata, s->key, NULL, 0); } return ff_filter_frame(outlink, frame); break; default: av_assert0(0); }; av_frame_free(&frame); return 0; }
static int filter_frame(AVFilterLink *inlink, AVFrame *in) { AVFilterContext *ctx = inlink->dst; DrawGraphContext *s = ctx->priv; AVFilterLink *outlink = ctx->outputs[0]; AVDictionary *metadata; AVDictionaryEntry *e; AVFrame *out = s->out; int i; if (!s->out || s->out->width != outlink->w || s->out->height != outlink->h) { av_frame_free(&s->out); s->out = ff_get_video_buffer(outlink, outlink->w, outlink->h); out = s->out; if (!s->out) { av_frame_free(&in); return AVERROR(ENOMEM); } clear_image(s, out, outlink); } av_frame_copy_props(out, in); metadata = av_frame_get_metadata(in); for (i = 0; i < 4; i++) { double values[VAR_VARS_NB]; int j, y, x, old; uint32_t fg, bg; float vf; e = av_dict_get(metadata, s->key[i], NULL, 0); if (!e || !e->value) continue; if (sscanf(e->value, "%f", &vf) != 1) continue; vf = av_clipf(vf, s->min, s->max); values[VAR_MIN] = s->min; values[VAR_MAX] = s->max; values[VAR_VAL] = vf; fg = av_expr_eval(s->fg_expr[i], values, NULL); bg = AV_RN32(s->bg); if (i == 0 && s->x >= outlink->w) { if (s->slide == 0 || s->slide == 1) s->x = 0; if (s->slide == 2) { s->x = outlink->w - 1; for (j = 0; j < outlink->h; j++) { memmove(out->data[0] + j * out->linesize[0] , out->data[0] + j * out->linesize[0] + 4, (outlink->w - 1) * 4); } } else if (s->slide == 0) { clear_image(s, out, outlink); } } x = s->x; y = (outlink->h - 1) * (1 - ((vf - s->min) / (s->max - s->min))); switch (s->mode) { case 0: if (i == 0 && (s->slide == 1 || s->slide == 2)) for (j = 0; j < outlink->h; j++) draw_dot(bg, x, j, out); old = AV_RN32(out->data[0] + y * out->linesize[0] + x * 4); for (j = y; j < outlink->h; j++) { if (old != bg && (AV_RN32(out->data[0] + j * out->linesize[0] + x * 4) != old) || AV_RN32(out->data[0] + FFMIN(j+1, outlink->h - 1) * out->linesize[0] + x * 4) != old) { draw_dot(fg, x, j, out); break; } draw_dot(fg, x, j, out); } break; case 1: if (i == 0 && (s->slide == 1 || s->slide == 2)) for (j = 0; j < outlink->h; j++) draw_dot(bg, x, j, out); draw_dot(fg, x, y, out); break; case 2: if (s->first) { s->first = 0; s->prev_y[i] = y; } if (i == 0 && (s->slide == 1 || s->slide == 2)) { for (j = 0; j < y; j++) draw_dot(bg, x, j, out); for (j = outlink->h - 1; j > y; j--) draw_dot(bg, x, j, out); } if (y <= s->prev_y[i]) { for (j = y; j <= s->prev_y[i]; j++) draw_dot(fg, x, j, out); } else { for (j = s->prev_y[i]; j <= y; j++) draw_dot(fg, x, j, out); } s->prev_y[i] = y; break; } } s->x++; av_frame_free(&in); return ff_filter_frame(outlink, av_frame_clone(s->out)); }
bool CFFmpegImage::LoadImageFromMemory(unsigned char* buffer, unsigned int bufSize, unsigned int width, unsigned int height) { uint8_t* fbuffer = (uint8_t*)av_malloc(FFMPEG_FILE_BUFFER_SIZE); if (!fbuffer) { CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not allocate FFMPEG_FILE_BUFFER_SIZE"); return false; } MemBuffer buf; buf.data = buffer; buf.size = bufSize; buf.pos = 0; AVIOContext* ioctx = avio_alloc_context(fbuffer, FFMPEG_FILE_BUFFER_SIZE, 0, &buf, mem_file_read, NULL, mem_file_seek); if (!ioctx) { av_free(fbuffer); CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not allocate AVIOContext"); return false; } AVFormatContext* fctx = avformat_alloc_context(); if (!fctx) { av_free(ioctx->buffer); av_free(ioctx); CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not allocate AVFormatContext"); return false; } fctx->pb = ioctx; ioctx->max_packet_size = FFMPEG_FILE_BUFFER_SIZE; // Some clients have pngs saved as jpeg or ask us for png but are jpeg // mythv throws all mimetypes away and asks us with application/octet-stream // this is poor man's fallback to at least identify png / jpeg bool is_jpeg = (bufSize > 2 && buffer[0] == 0xFF && buffer[1] == 0xD8 && buffer[2] == 0xFF); bool is_png = (bufSize > 3 && buffer[1] == 'P' && buffer[2] == 'N' && buffer[3] == 'G'); bool is_tiff = (bufSize > 2 && buffer[0] == 'I' && buffer[1] == 'I' && buffer[2] == '*'); AVInputFormat* inp = nullptr; if (is_jpeg) inp = av_find_input_format("jpeg_pipe"); else if (is_png) inp = av_find_input_format("png_pipe"); else if (is_tiff) inp = av_find_input_format("tiff_pipe"); else if (m_strMimeType == "image/jp2") inp = av_find_input_format("j2k_pipe"); else if (m_strMimeType == "image/webp") inp = av_find_input_format("webp_pipe"); // brute force parse if above check already failed else if (m_strMimeType == "image/jpeg" || m_strMimeType == "image/jpg") inp = av_find_input_format("jpeg_pipe"); else if (m_strMimeType == "image/png") inp = av_find_input_format("png_pipe"); else if (m_strMimeType == "image/tiff") inp = av_find_input_format("tiff_pipe"); if (avformat_open_input(&fctx, "", inp, NULL) < 0) { CLog::Log(LOGERROR, "Could not find suitable input format: %s", m_strMimeType.c_str()); avformat_close_input(&fctx); FreeIOCtx(ioctx); return false; } AVCodecContext* codec_ctx = fctx->streams[0]->codec; AVCodec* codec = avcodec_find_decoder(codec_ctx->codec_id); if (avcodec_open2(codec_ctx, codec, NULL) < 0) { avformat_close_input(&fctx); FreeIOCtx(ioctx); return false; } AVPacket pkt; AVFrame* frame = av_frame_alloc(); av_read_frame(fctx, &pkt); int frame_decoded; int ret = avcodec_decode_video2(codec_ctx, frame, &frame_decoded, &pkt); if (ret < 0) CLog::Log(LOGDEBUG, "Error [%d] while decoding frame: %s\n", ret, strerror(AVERROR(ret))); if (frame_decoded != 0) { av_frame_free(&m_pFrame); m_pFrame = av_frame_clone(frame); if (m_pFrame) { m_height = m_pFrame->height; m_width = m_pFrame->width; m_originalWidth = m_width; m_originalHeight = m_height; const AVPixFmtDescriptor* pixDescriptor = av_pix_fmt_desc_get(static_cast<AVPixelFormat>(m_pFrame->format)); if (pixDescriptor && ((pixDescriptor->flags & (AV_PIX_FMT_FLAG_ALPHA | AV_PIX_FMT_FLAG_PAL)) != 0)) m_hasAlpha = true; AVDictionary* dic = av_frame_get_metadata(m_pFrame); AVDictionaryEntry* entry = NULL; if (dic) { entry = av_dict_get(dic, "Orientation", NULL, AV_DICT_MATCH_CASE); if (entry && entry->value) { int orientation = atoi(entry->value); // only values between including 0 and including 8 // http://sylvana.net/jpegcrop/exif_orientation.html if (orientation >= 0 && orientation <= 8) m_orientation = (unsigned int) orientation; } } } else { CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not allocate a picture data buffer"); frame_decoded = 0; } } else CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not decode a frame"); av_frame_free(&frame); av_free_packet(&pkt); avcodec_close(codec_ctx); avformat_close_input(&fctx); FreeIOCtx(ioctx); return (frame_decoded != 0); }
KeyValueBag* MediaRaw::getMetaData() { return KeyValueBagImpl::make(av_frame_get_metadata(getCtx())); }