Exemple #1
0
static void show_stream(AVFormatContext *fmt_ctx, int stream_idx)
{
    AVStream *stream = fmt_ctx->streams[stream_idx];
    AVCodecContext *dec_ctx;
    const AVCodec *dec;
    const char *profile;
    char val_str[128];
    AVRational display_aspect_ratio, *sar = NULL;
    const AVPixFmtDescriptor *desc;

    probe_object_header("stream");

    probe_int("index", stream->index);

    if ((dec_ctx = stream->codec)) {
        if ((dec = dec_ctx->codec)) {
            probe_str("codec_name", dec->name);
            probe_str("codec_long_name", dec->long_name);
        } else {
            probe_str("codec_name", "unknown");
        }

        probe_str("codec_type", media_type_string(dec_ctx->codec_type));
        probe_str("codec_time_base",
                  rational_string(val_str, sizeof(val_str),
                                  "/", &dec_ctx->time_base));

        /* print AVI/FourCC tag */
        av_get_codec_tag_string(val_str, sizeof(val_str), dec_ctx->codec_tag);
        probe_str("codec_tag_string", val_str);
        probe_str("codec_tag", tag_string(val_str, sizeof(val_str),
                                          dec_ctx->codec_tag));

        /* print profile, if there is one */
        if (dec && (profile = av_get_profile_name(dec, dec_ctx->profile)))
            probe_str("profile", profile);

        switch (dec_ctx->codec_type) {
        case AVMEDIA_TYPE_VIDEO:
            probe_int("width", dec_ctx->width);
            probe_int("height", dec_ctx->height);
            probe_int("coded_width", dec_ctx->coded_width);
            probe_int("coded_height", dec_ctx->coded_height);
            probe_int("has_b_frames", dec_ctx->has_b_frames);
            if (dec_ctx->sample_aspect_ratio.num)
                sar = &dec_ctx->sample_aspect_ratio;
            else if (stream->sample_aspect_ratio.num)
                sar = &stream->sample_aspect_ratio;

            if (sar) {
                probe_str("sample_aspect_ratio",
                          rational_string(val_str, sizeof(val_str), ":", sar));
                av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
                          dec_ctx->width  * sar->num, dec_ctx->height * sar->den,
                          1024*1024);
                probe_str("display_aspect_ratio",
                          rational_string(val_str, sizeof(val_str), ":",
                          &display_aspect_ratio));
            }
            desc = av_pix_fmt_desc_get(dec_ctx->pix_fmt);
            probe_str("pix_fmt", desc ? desc->name : "unknown");
            probe_int("level", dec_ctx->level);

            probe_str("color_range", av_color_range_name(dec_ctx->color_range));
            probe_str("color_space", av_color_space_name(dec_ctx->colorspace));
            probe_str("color_trc", av_color_transfer_name(dec_ctx->color_trc));
            probe_str("color_pri", av_color_primaries_name(dec_ctx->color_primaries));
            probe_str("chroma_loc", av_chroma_location_name(dec_ctx->chroma_sample_location));
            break;

        case AVMEDIA_TYPE_AUDIO:
            probe_str("sample_rate",
                      value_string(val_str, sizeof(val_str),
                                   dec_ctx->sample_rate,
                                   unit_hertz_str));
            probe_int("channels", dec_ctx->channels);
            probe_int("bits_per_sample",
                      av_get_bits_per_sample(dec_ctx->codec_id));
            break;
        }
    } else {
        probe_str("codec_type", "unknown");
    }

    if (fmt_ctx->iformat->flags & AVFMT_SHOW_IDS)
        probe_int("id", stream->id);
    probe_str("avg_frame_rate",
              rational_string(val_str, sizeof(val_str), "/",
              &stream->avg_frame_rate));
    if (dec_ctx->bit_rate)
        probe_str("bit_rate",
                  value_string(val_str, sizeof(val_str),
                               dec_ctx->bit_rate, unit_bit_per_second_str));
    probe_str("time_base",
              rational_string(val_str, sizeof(val_str), "/",
              &stream->time_base));
    probe_str("start_time",
              time_value_string(val_str, sizeof(val_str),
                                stream->start_time, &stream->time_base));
    probe_str("duration",
              time_value_string(val_str, sizeof(val_str),
                                stream->duration, &stream->time_base));
    if (stream->nb_frames)
        probe_int("nb_frames", stream->nb_frames);

    probe_dict(stream->metadata, "tags");

    if (stream->nb_side_data) {
        int i, j;
        probe_object_header("sidedata");
        for (i = 0; i < stream->nb_side_data; i++) {
            const AVPacketSideData* sd = &stream->side_data[i];
            switch (sd->type) {
            case AV_PKT_DATA_DISPLAYMATRIX:
                probe_object_header("displaymatrix");
                probe_array_header("matrix", 1);
                for (j = 0; j < 9; j++)
                    probe_int(NULL, ((int32_t *)sd->data)[j]);
                probe_array_footer("matrix", 1);
                probe_int("rotation",
                          av_display_rotation_get((int32_t *)sd->data));
                probe_object_footer("displaymatrix");
                break;
            }
        }
        probe_object_footer("sidedata");
    }

    probe_object_footer("stream");
}
Exemple #2
0
static int filter_frame(AVFilterLink *link, AVFrame *in)
{
    TonemapContext *s = link->dst->priv;
    AVFilterLink *outlink = link->dst->outputs[0];
    AVFrame *out;
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format);
    const AVPixFmtDescriptor *odesc = av_pix_fmt_desc_get(outlink->format);
    int ret, x, y;
    double peak = s->peak;

    if (!desc || !odesc) {
        av_frame_free(&in);
        return AVERROR_BUG;
    }

    out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
    if (!out) {
        av_frame_free(&in);
        return AVERROR(ENOMEM);
    }

    ret = av_frame_copy_props(out, in);
    if (ret < 0) {
        av_frame_free(&in);
        av_frame_free(&out);
        return ret;
    }

    /* input and output transfer will be linear */
    if (in->color_trc == AVCOL_TRC_UNSPECIFIED) {
        av_log(s, AV_LOG_WARNING, "Untagged transfer, assuming linear light\n");
        out->color_trc = AVCOL_TRC_LINEAR;
    } else if (in->color_trc != AVCOL_TRC_LINEAR)
        av_log(s, AV_LOG_WARNING, "Tonemapping works on linear light only\n");

    /* read peak from side data if not passed in */
    if (!peak) {
        peak = ff_determine_signal_peak(in);
        av_log(s, AV_LOG_DEBUG, "Computed signal peak: %f\n", peak);
    }

    /* load original color space even if pixel format is RGB to compute overbrights */
    s->coeffs = &luma_coefficients[in->colorspace];
    if (s->desat > 0 && (in->colorspace == AVCOL_SPC_UNSPECIFIED || !s->coeffs)) {
        if (in->colorspace == AVCOL_SPC_UNSPECIFIED)
            av_log(s, AV_LOG_WARNING, "Missing color space information, ");
        else if (!s->coeffs)
            av_log(s, AV_LOG_WARNING, "Unsupported color space '%s', ",
                   av_color_space_name(in->colorspace));
        av_log(s, AV_LOG_WARNING, "desaturation is disabled\n");
        s->desat = 0;
    }

    /* do the tone map */
    for (y = 0; y < out->height; y++)
        for (x = 0; x < out->width; x++)
            tonemap(s, out, in, desc, x, y, peak);

    /* copy/generate alpha if needed */
    if (desc->flags & AV_PIX_FMT_FLAG_ALPHA && odesc->flags & AV_PIX_FMT_FLAG_ALPHA) {
        av_image_copy_plane(out->data[3], out->linesize[3],
                            in->data[3], in->linesize[3],
                            out->linesize[3], outlink->h);
    } else if (odesc->flags & AV_PIX_FMT_FLAG_ALPHA) {
        for (y = 0; y < out->height; y++) {
            for (x = 0; x < out->width; x++) {
                AV_WN32(out->data[3] + x * odesc->comp[3].step + y * out->linesize[3],
                        av_float2int(1.0f));
            }
        }
    }

    av_frame_free(&in);

    ff_update_hdr_metadata(out, peak);

    return ff_filter_frame(outlink, out);
}