Пример #1
0
static int write_skip_frames(AVFormatContext *s, int stream_index, int64_t dts)
{
    AVIStream *avist    = s->streams[stream_index]->priv_data;
    AVCodecContext *enc = s->streams[stream_index]->codec;

    av_dlog(s, "dts:%s packet_count:%d stream_index:%d\n", av_ts2str(dts), avist->packet_count, stream_index);
    while (enc->block_align == 0 && dts != AV_NOPTS_VALUE &&
           dts > avist->packet_count && enc->codec_id != AV_CODEC_ID_XSUB && avist->packet_count) {
        AVPacket empty_packet;

        if (dts - avist->packet_count > 60000) {
            av_log(s, AV_LOG_ERROR, "Too large number of skipped frames %"PRId64" > 60000\n", dts - avist->packet_count);
            return AVERROR(EINVAL);
        }

        av_init_packet(&empty_packet);
        empty_packet.size         = 0;
        empty_packet.data         = NULL;
        empty_packet.stream_index = stream_index;
        avi_write_packet(s, &empty_packet);
        av_dlog(s, "dup dts:%s packet_count:%d\n", av_ts2str(dts), avist->packet_count);
    }

    return 0;
}
Пример #2
0
static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt, const char *tag)
{
    AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
    printf("%s: pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
           tag,
           av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
           av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
           av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
           pkt->stream_index);
}
Пример #3
0
static void end_frame(AVFilterLink *inlink)
{
    AVFilterContext *ctx = inlink->dst;
    BBoxContext *bbox = ctx->priv;
    AVFilterBufferRef *picref = inlink->cur_buf;
    FFBoundingBox box;
    int has_bbox, w, h;

    has_bbox =
        ff_calculate_bounding_box(&box,
                                  picref->data[0], picref->linesize[0],
                                  inlink->w, inlink->h, 16);
    w = box.x2 - box.x1 + 1;
    h = box.y2 - box.y1 + 1;

    av_log(ctx, AV_LOG_INFO,
           "n:%d pts:%s pts_time:%s", bbox->frame,
           av_ts2str(picref->pts), av_ts2timestr(picref->pts, &inlink->time_base));

    if (has_bbox) {
        av_log(ctx, AV_LOG_INFO,
               "x1:%d x2:%d y1:%d y2:%d w:%d h:%d"
               " crop=%d:%d:%d:%d drawbox=%d:%d:%d:%d",
               box.x1, box.x2, box.y1, box.y2, w, h,
               w, h, box.x1, box.y1,    /* crop params */
               box.x1, box.y1, w, h);   /* drawbox params */
    }
    av_log(ctx, AV_LOG_INFO, "\n");

    bbox->frame++;
    avfilter_end_frame(inlink->dst->outputs[0]);
}
Пример #4
0
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
    AVFilterContext *ctx = inlink->dst;
    AShowInfoContext *s  = ctx->priv;
    char chlayout_str[128];
    uint32_t checksum = 0;
    int channels    = av_get_channel_layout_nb_channels(buf->channel_layout);
    int planar      = av_sample_fmt_is_planar(buf->format);
    int block_align = av_get_bytes_per_sample(buf->format) * (planar ? 1 : channels);
    int data_size   = buf->nb_samples * block_align;
    int planes      = planar ? channels : 1;
    int i;
    void *tmp_ptr = av_realloc(s->plane_checksums, channels * sizeof(*s->plane_checksums));

    if (!tmp_ptr)
        return AVERROR(ENOMEM);
    s->plane_checksums = tmp_ptr;

    for (i = 0; i < planes; i++) {
        uint8_t *data = buf->extended_data[i];

        s->plane_checksums[i] = av_adler32_update(0, data, data_size);
        checksum = i ? av_adler32_update(checksum, data, data_size) :
                       s->plane_checksums[0];
    }

    av_get_channel_layout_string(chlayout_str, sizeof(chlayout_str), -1,
                                 buf->channel_layout);

    av_log(ctx, AV_LOG_INFO,
           "n:%"PRId64" pts:%s pts_time:%s pos:%"PRId64" "
           "fmt:%s channels:%d chlayout:%s rate:%d nb_samples:%d "
           "checksum:%08X ",
           inlink->frame_count,
           av_ts2str(buf->pts), av_ts2timestr(buf->pts, &inlink->time_base),
           av_frame_get_pkt_pos(buf),
           av_get_sample_fmt_name(buf->format), av_frame_get_channels(buf), chlayout_str,
           buf->sample_rate, buf->nb_samples,
           checksum);

    av_log(ctx, AV_LOG_INFO, "plane_checksums: [ ");
    for (i = 0; i < planes; i++)
        av_log(ctx, AV_LOG_INFO, "%08X ", s->plane_checksums[i]);
    av_log(ctx, AV_LOG_INFO, "]\n");

    return ff_filter_frame(inlink->dst->outputs[0], buf);
}
Пример #5
0
static int end_frame(AVFilterLink *inlink)
{
    AVFilterContext *ctx = inlink->dst;
    ShowInfoContext *showinfo = ctx->priv;
    AVFilterBufferRef *picref = inlink->cur_buf;
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
    uint32_t plane_checksum[4] = {0}, checksum = 0;
    int i, plane, vsub = desc->log2_chroma_h;

    for (plane = 0; picref->data[plane] && plane < 4; plane++) {
        int64_t linesize = av_image_get_linesize(picref->format, picref->video->w, plane);
        uint8_t *data = picref->data[plane];
        int h = plane == 1 || plane == 2 ? inlink->h >> vsub : inlink->h;

        if (linesize < 0)
            return linesize;

        for (i = 0; i < h; i++) {
            plane_checksum[plane] = av_adler32_update(plane_checksum[plane], data, linesize);
            checksum = av_adler32_update(checksum, data, linesize);
            data += picref->linesize[plane];
        }
    }

    av_log(ctx, AV_LOG_INFO,
           "n:%d pts:%s pts_time:%s pos:%"PRId64" "
           "fmt:%s sar:%d/%d s:%dx%d i:%c iskey:%d type:%c "
           "checksum:%08X plane_checksum:[%08X",
           showinfo->frame,
           av_ts2str(picref->pts), av_ts2timestr(picref->pts, &inlink->time_base), picref->pos,
           desc->name,
           picref->video->sample_aspect_ratio.num, picref->video->sample_aspect_ratio.den,
           picref->video->w, picref->video->h,
           !picref->video->interlaced     ? 'P' :         /* Progressive  */
           picref->video->top_field_first ? 'T' : 'B',    /* Top / Bottom */
           picref->video->key_frame,
           av_get_picture_type_char(picref->video->pict_type),
           checksum, plane_checksum[0]);

    for (plane = 1; picref->data[plane] && plane < 4; plane++)
        av_log(ctx, AV_LOG_INFO, " %08X", plane_checksum[plane]);
    av_log(ctx, AV_LOG_INFO, "]\n");

    showinfo->frame++;
    return ff_end_frame(inlink->dst->outputs[0]);
}
Пример #6
0
static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *samplesref)
{
    AVFilterContext *ctx = inlink->dst;
    ShowInfoContext *showinfo = ctx->priv;
    uint32_t plane_checksum[8] = {0}, checksum = 0;
    char chlayout_str[128];
    int plane;
    int linesize =
        samplesref->audio->nb_samples *
        av_get_bytes_per_sample(samplesref->format);
    if (!av_sample_fmt_is_planar(samplesref->format))
        linesize *= av_get_channel_layout_nb_channels(samplesref->audio->channel_layout);

    for (plane = 0; samplesref->data[plane] && plane < 8; plane++) {
        uint8_t *data = samplesref->data[plane];

        plane_checksum[plane] = av_adler32_update(plane_checksum[plane],
                                                  data, linesize);
        checksum = av_adler32_update(checksum, data, linesize);
    }

    av_get_channel_layout_string(chlayout_str, sizeof(chlayout_str), -1,
                                 samplesref->audio->channel_layout);

    av_log(ctx, AV_LOG_INFO,
           "n:%d pts:%s pts_time:%s pos:%"PRId64" "
           "fmt:%s chlayout:%s nb_samples:%d rate:%d "
           "checksum:%08X plane_checksum[%08X",
           showinfo->frame,
           av_ts2str(samplesref->pts), av_ts2timestr(samplesref->pts, &inlink->time_base),
           samplesref->pos,
           av_get_sample_fmt_name(samplesref->format),
           chlayout_str,
           samplesref->audio->nb_samples,
           samplesref->audio->sample_rate,
           checksum,
           plane_checksum[0]);

    for (plane = 1; samplesref->data[plane] && plane < 8; plane++)
        av_log(ctx, AV_LOG_INFO, " %08X", plane_checksum[plane]);
    av_log(ctx, AV_LOG_INFO, "]\n");

    showinfo->frame++;
    ff_filter_samples(inlink->dst->outputs[0], samplesref);
}
Пример #7
0
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
    AVFilterContext *ctx = inlink->dst;
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
    uint32_t plane_checksum[4] = {0}, checksum = 0;
    int i, plane, vsub = desc->log2_chroma_h;

    for (plane = 0; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++) {
        int64_t linesize = av_image_get_linesize(frame->format, frame->width, plane);
        uint8_t *data = frame->data[plane];
        int h = plane == 1 || plane == 2 ? FF_CEIL_RSHIFT(inlink->h, vsub) : inlink->h;

        if (linesize < 0)
            return linesize;

        for (i = 0; i < h; i++) {
            plane_checksum[plane] = av_adler32_update(plane_checksum[plane], data, linesize);
            checksum = av_adler32_update(checksum, data, linesize);
            data += frame->linesize[plane];
        }
    }

    av_log(ctx, AV_LOG_INFO,
           "n:%"PRId64" pts:%s pts_time:%s pos:%"PRId64" "
           "fmt:%s sar:%d/%d s:%dx%d i:%c iskey:%d type:%c "
           "checksum:%08X plane_checksum:[%08X",
           inlink->frame_count,
           av_ts2str(frame->pts), av_ts2timestr(frame->pts, &inlink->time_base), av_frame_get_pkt_pos(frame),
           desc->name,
           frame->sample_aspect_ratio.num, frame->sample_aspect_ratio.den,
           frame->width, frame->height,
           !frame->interlaced_frame ? 'P' :         /* Progressive  */
           frame->top_field_first   ? 'T' : 'B',    /* Top / Bottom */
           frame->key_frame,
           av_get_picture_type_char(frame->pict_type),
           checksum, plane_checksum[0]);

    for (plane = 1; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++)
        av_log(ctx, AV_LOG_INFO, " %08X", plane_checksum[plane]);
    av_log(ctx, AV_LOG_INFO, "]\n");

    return ff_filter_frame(inlink->dst->outputs[0], frame);
}
Пример #8
0
// TODO: document metadata
static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
{
    AVFilterContext *ctx = inlink->dst;
    BlackDetectContext *blackdetect = ctx->priv;
    double picture_black_ratio = 0;
    const uint8_t *p = picref->data[0];
    int x, i;

    for (i = 0; i < inlink->h; i++) {
        for (x = 0; x < inlink->w; x++)
            blackdetect->nb_black_pixels += p[x] <= blackdetect->pixel_black_th_i;
        p += picref->linesize[0];
    }

    picture_black_ratio = (double)blackdetect->nb_black_pixels / (inlink->w * inlink->h);

    av_log(ctx, AV_LOG_DEBUG,
           "frame:%"PRId64" picture_black_ratio:%f pts:%s t:%s type:%c\n",
           inlink->frame_count_out, picture_black_ratio,
           av_ts2str(picref->pts), av_ts2timestr(picref->pts, &inlink->time_base),
           av_get_picture_type_char(picref->pict_type));

    if (picture_black_ratio >= blackdetect->picture_black_ratio_th) {
        if (!blackdetect->black_started) {
            /* black starts here */
            blackdetect->black_started = 1;
            blackdetect->black_start = picref->pts;
            av_dict_set(&picref->metadata, "lavfi.black_start",
                av_ts2timestr(blackdetect->black_start, &inlink->time_base), 0);
        }
    } else if (blackdetect->black_started) {
        /* black ends here */
        blackdetect->black_started = 0;
        blackdetect->black_end = picref->pts;
        check_black_end(ctx);
        av_dict_set(&picref->metadata, "lavfi.black_end",
            av_ts2timestr(blackdetect->black_end, &inlink->time_base), 0);
    }

    blackdetect->last_picref_pts = picref->pts;
    blackdetect->nb_black_pixels = 0;
    return ff_filter_frame(inlink->dst->outputs[0], picref);
}
Пример #9
0
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
    AVFilterContext *ctx = inlink->dst;
    BBoxContext *bbox = ctx->priv;
    FFBoundingBox box;
    int has_bbox, w, h;

    has_bbox =
        ff_calculate_bounding_box(&box,
                                  frame->data[0], frame->linesize[0],
                                  inlink->w, inlink->h, bbox->min_val);
    w = box.x2 - box.x1 + 1;
    h = box.y2 - box.y1 + 1;

    av_log(ctx, AV_LOG_INFO,
           "n:%"PRId64" pts:%s pts_time:%s", inlink->frame_count_out,
           av_ts2str(frame->pts), av_ts2timestr(frame->pts, &inlink->time_base));

    if (has_bbox) {
        AVDictionary **metadata = &frame->metadata;

        SET_META("lavfi.bbox.x1", box.x1)
        SET_META("lavfi.bbox.x2", box.x2)
        SET_META("lavfi.bbox.y1", box.y1)
        SET_META("lavfi.bbox.y2", box.y2)
        SET_META("lavfi.bbox.w",  w)
        SET_META("lavfi.bbox.h",  h)

        av_log(ctx, AV_LOG_INFO,
               " x1:%d x2:%d y1:%d y2:%d w:%d h:%d"
               " crop=%d:%d:%d:%d drawbox=%d:%d:%d:%d",
               box.x1, box.x2, box.y1, box.y2, w, h,
               w, h, box.x1, box.y1,    /* crop params */
               box.x1, box.y1, w, h);   /* drawbox params */
    }
    av_log(ctx, AV_LOG_INFO, "\n");

    return ff_filter_frame(inlink->dst->outputs[0], frame);
}
Пример #10
0
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
    AVFilterContext *ctx = inlink->dst;
    AShowInfoContext *s  = ctx->priv;
    char chlayout_str[128];
    uint32_t checksum = 0;
    int channels    = inlink->channels;
    int planar      = av_sample_fmt_is_planar(buf->format);
    int block_align = av_get_bytes_per_sample(buf->format) * (planar ? 1 : channels);
    int data_size   = buf->nb_samples * block_align;
    int planes      = planar ? channels : 1;
    int i;
    void *tmp_ptr = av_realloc_array(s->plane_checksums, channels, sizeof(*s->plane_checksums));

    if (!tmp_ptr)
        return AVERROR(ENOMEM);
    s->plane_checksums = tmp_ptr;

    for (i = 0; i < planes; i++) {
        uint8_t *data = buf->extended_data[i];

        s->plane_checksums[i] = av_adler32_update(0, data, data_size);
        checksum = i ? av_adler32_update(checksum, data, data_size) :
                       s->plane_checksums[0];
    }

    av_get_channel_layout_string(chlayout_str, sizeof(chlayout_str), -1,
                                 buf->channel_layout);

    av_log(ctx, AV_LOG_INFO,
           "n:%"PRId64" pts:%s pts_time:%s pos:%"PRId64" "
           "fmt:%s channels:%d chlayout:%s rate:%d nb_samples:%d "
           "checksum:%08"PRIX32" ",
           inlink->frame_count_out,
           av_ts2str(buf->pts), av_ts2timestr(buf->pts, &inlink->time_base),
           av_frame_get_pkt_pos(buf),
           av_get_sample_fmt_name(buf->format), av_frame_get_channels(buf), chlayout_str,
           buf->sample_rate, buf->nb_samples,
           checksum);

    av_log(ctx, AV_LOG_INFO, "plane_checksums: [ ");
    for (i = 0; i < planes; i++)
        av_log(ctx, AV_LOG_INFO, "%08"PRIX32" ", s->plane_checksums[i]);
    av_log(ctx, AV_LOG_INFO, "]\n");

    for (i = 0; i < buf->nb_side_data; i++) {
        AVFrameSideData *sd = buf->side_data[i];

        av_log(ctx, AV_LOG_INFO, "  side data - ");
        switch (sd->type) {
        case AV_FRAME_DATA_MATRIXENCODING: dump_matrixenc (ctx, sd); break;
        case AV_FRAME_DATA_DOWNMIX_INFO:   dump_downmix   (ctx, sd); break;
        case AV_FRAME_DATA_REPLAYGAIN:     dump_replaygain(ctx, sd); break;
        case AV_FRAME_DATA_AUDIO_SERVICE_TYPE: dump_audio_service_type(ctx, sd); break;
        default:                           dump_unknown   (ctx, sd); break;
        }

        av_log(ctx, AV_LOG_INFO, "\n");
    }

    return ff_filter_frame(inlink->dst->outputs[0], buf);
}
Пример #11
0
static int avi_write_packet(AVFormatContext *s, AVPacket *pkt)
{
    AVIContext *avi = s->priv_data;
    AVIOContext *pb = s->pb;
    unsigned char tag[5];
    unsigned int flags=0;
    const int stream_index= pkt->stream_index;
    AVIStream *avist= s->streams[stream_index]->priv_data;
    AVCodecContext *enc= s->streams[stream_index]->codec;
    int size= pkt->size;

    av_dlog(s, "dts:%s packet_count:%d stream_index:%d\n", av_ts2str(pkt->dts), avist->packet_count, stream_index);
    while(enc->block_align==0 && pkt->dts != AV_NOPTS_VALUE && pkt->dts > avist->packet_count && enc->codec_id != AV_CODEC_ID_XSUB && avist->packet_count){
        AVPacket empty_packet;

        if(pkt->dts - avist->packet_count > 60000){
            av_log(s, AV_LOG_ERROR, "Too large number of skipped frames %"PRId64" > 60000\n", pkt->dts - avist->packet_count);
            return AVERROR(EINVAL);
        }

        av_init_packet(&empty_packet);
        empty_packet.size= 0;
        empty_packet.data= NULL;
        empty_packet.stream_index= stream_index;
        avi_write_packet(s, &empty_packet);
        av_dlog(s, "dup dts:%s packet_count:%d\n", av_ts2str(pkt->dts), avist->packet_count);
    }
    avist->packet_count++;

    // Make sure to put an OpenDML chunk when the file size exceeds the limits
    if (pb->seekable &&
        (avio_tell(pb) - avi->riff_start > AVI_MAX_RIFF_SIZE)) {

        avi_write_ix(s);
        ff_end_tag(pb, avi->movi_list);

        if (avi->riff_id == 1)
            avi_write_idx1(s);

        ff_end_tag(pb, avi->riff_start);
        avi->movi_list = avi_start_new_riff(s, pb, "AVIX", "movi");
    }

    avi_stream2fourcc(tag, stream_index, enc->codec_type);
    if(pkt->flags&AV_PKT_FLAG_KEY)
        flags = 0x10;
    if (enc->codec_type == AVMEDIA_TYPE_AUDIO) {
       avist->audio_strm_length += size;
    }

    if (s->pb->seekable) {
        AVIIndex* idx = &avist->indexes;
        int cl = idx->entry / AVI_INDEX_CLUSTER_SIZE;
        int id = idx->entry % AVI_INDEX_CLUSTER_SIZE;
        if (idx->ents_allocated <= idx->entry) {
            idx->cluster = av_realloc_f(idx->cluster, sizeof(void*), cl+1);
            if (!idx->cluster) {
                idx->ents_allocated = 0;
                idx->entry = 0;
                return AVERROR(ENOMEM);
            }
            idx->cluster[cl] = av_malloc(AVI_INDEX_CLUSTER_SIZE*sizeof(AVIIentry));
            if (!idx->cluster[cl])
                return AVERROR(ENOMEM);
            idx->ents_allocated += AVI_INDEX_CLUSTER_SIZE;
        }

        idx->cluster[cl][id].flags = flags;
        idx->cluster[cl][id].pos = avio_tell(pb) - avi->movi_list;
        idx->cluster[cl][id].len = size;
        idx->entry++;
    }

    avio_write(pb, tag, 4);
    avio_wl32(pb, size);
    avio_write(pb, pkt->data, size);
    if (size & 1)
        avio_w8(pb, 0);

    return 0;
}
Пример #12
0
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
    AVFilterContext *ctx = inlink->dst;
    AVFilterLink *outlink = ctx->outputs[0];
    MetadataContext *s = ctx->priv;
    AVDictionary *metadata = av_frame_get_metadata(frame);
    AVDictionaryEntry *e;

    if (!metadata)
        return ff_filter_frame(outlink, frame);

    e = av_dict_get(metadata, !s->key ? "" : s->key, NULL,
                    !s->key ? AV_DICT_IGNORE_SUFFIX: 0);

    switch (s->mode) {
    case METADATA_SELECT:
        if (!s->value && e && e->value) {
            return ff_filter_frame(outlink, frame);
        } else if (s->value && e && e->value &&
                   s->compare(s, e->value, s->value)) {
            return ff_filter_frame(outlink, frame);
        }
        break;
    case METADATA_ADD:
        if (e && e->value) {
            ;
        } else {
            av_dict_set(&metadata, s->key, s->value, 0);
        }
        return ff_filter_frame(outlink, frame);
        break;
    case METADATA_MODIFY:
        if (e && e->value) {
            av_dict_set(&metadata, s->key, s->value, 0);
        }
        return ff_filter_frame(outlink, frame);
        break;
    case METADATA_PRINT:
        if (!s->key && e) {
            s->print(ctx, "frame:%-4"PRId64" pts:%-7s pts_time:%-7s\n",
                     inlink->frame_count, av_ts2str(frame->pts), av_ts2timestr(frame->pts, &inlink->time_base));
            s->print(ctx, "%s=%s\n", e->key, e->value);
            while ((e = av_dict_get(metadata, "", e, AV_DICT_IGNORE_SUFFIX)) != NULL) {
                s->print(ctx, "%s=%s\n", e->key, e->value);
            }
        } else if (e && e->value && (!s->value || (e->value && s->compare(s, e->value, s->value)))) {
            s->print(ctx, "frame:%-4"PRId64" pts:%-7s pts_time:%-7s\n",
                     inlink->frame_count, av_ts2str(frame->pts), av_ts2timestr(frame->pts, &inlink->time_base));
            s->print(ctx, "%s=%s\n", s->key, e->value);
        }
        return ff_filter_frame(outlink, frame);
        break;
    case METADATA_DELETE:
        if (e && e->value && s->value && s->compare(s, e->value, s->value)) {
            av_dict_set(&metadata, s->key, NULL, 0);
        } else if (e && e->value) {
            av_dict_set(&metadata, s->key, NULL, 0);
        }
        return ff_filter_frame(outlink, frame);
        break;
    default:
        av_assert0(0);
    };

    av_frame_free(&frame);

    return 0;
}
Пример #13
0
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
    AVFilterContext *ctx = inlink->dst;
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
    uint32_t plane_checksum[4] = {0}, checksum = 0;
    int64_t sum[4] = {0}, sum2[4] = {0};
    int32_t pixelcount[4] = {0};
    int i, plane, vsub = desc->log2_chroma_h;
#ifdef IDE_COMPILE
	char tmp1[32] = {0};
	char tmp2[32] = {0};
#endif

    for (plane = 0; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++) {
        int64_t linesize = av_image_get_linesize(frame->format, frame->width, plane);
        uint8_t *data = frame->data[plane];
        int h = plane == 1 || plane == 2 ? FF_CEIL_RSHIFT(inlink->h, vsub) : inlink->h;

        if (linesize < 0)
            return linesize;

        for (i = 0; i < h; i++) {
            plane_checksum[plane] = av_adler32_update(plane_checksum[plane], data, linesize);
            checksum = av_adler32_update(checksum, data, linesize);

            update_sample_stats(data, linesize, sum+plane, sum2+plane);
            pixelcount[plane] += linesize;
            data += frame->linesize[plane];
        }
    }

#ifdef IDE_COMPILE
	av_log(ctx, AV_LOG_INFO,
           "n:%"PRId64" pts:%s pts_time:%s pos:%"PRId64" "
           "fmt:%s sar:%d/%d s:%dx%d i:%c iskey:%d type:%c "
           "checksum:%08"PRIX32" plane_checksum:[%08"PRIX32,
           inlink->frame_count,
           av_ts_make_string(tmp1, frame->pts), av_ts_make_time_string(tmp2, frame->pts, &inlink->time_base), av_frame_get_pkt_pos(frame),
           desc->name,
           frame->sample_aspect_ratio.num, frame->sample_aspect_ratio.den,
           frame->width, frame->height,
           !frame->interlaced_frame ? 'P' :         /* Progressive  */
           frame->top_field_first   ? 'T' : 'B',    /* Top / Bottom */
           frame->key_frame,
           av_get_picture_type_char(frame->pict_type),
           checksum, plane_checksum[0]);
#else
	av_log(ctx, AV_LOG_INFO,
           "n:%"PRId64" pts:%s pts_time:%s pos:%"PRId64" "
           "fmt:%s sar:%d/%d s:%dx%d i:%c iskey:%d type:%c "
           "checksum:%08"PRIX32" plane_checksum:[%08"PRIX32,
           inlink->frame_count,
           av_ts2str(frame->pts), av_ts2timestr(frame->pts, &inlink->time_base), av_frame_get_pkt_pos(frame),
           desc->name,
           frame->sample_aspect_ratio.num, frame->sample_aspect_ratio.den,
           frame->width, frame->height,
           !frame->interlaced_frame ? 'P' :         /* Progressive  */
           frame->top_field_first   ? 'T' : 'B',    /* Top / Bottom */
           frame->key_frame,
           av_get_picture_type_char(frame->pict_type),
           checksum, plane_checksum[0]);
#endif

    for (plane = 1; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++)
        av_log(ctx, AV_LOG_INFO, " %08"PRIX32, plane_checksum[plane]);
    av_log(ctx, AV_LOG_INFO, "] mean:[");
    for (plane = 0; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++)
        av_log(ctx, AV_LOG_INFO, "%"PRId64" ", (sum[plane] + pixelcount[plane]/2) / pixelcount[plane]);
    av_log(ctx, AV_LOG_INFO, "\b] stdev:[");
    for (plane = 0; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++)
        av_log(ctx, AV_LOG_INFO, "%3.1f ",
               sqrt((sum2[plane] - sum[plane]*(double)sum[plane]/pixelcount[plane])/pixelcount[plane]));
    av_log(ctx, AV_LOG_INFO, "\b]\n");

    for (i = 0; i < frame->nb_side_data; i++) {
        AVFrameSideData *sd = frame->side_data[i];

        av_log(ctx, AV_LOG_INFO, "  side data - ");
        switch (sd->type) {
        case AV_FRAME_DATA_PANSCAN:
            av_log(ctx, AV_LOG_INFO, "pan/scan");
            break;
        case AV_FRAME_DATA_A53_CC:
            av_log(ctx, AV_LOG_INFO, "A/53 closed captions (%d bytes)", sd->size);
            break;
        case AV_FRAME_DATA_STEREO3D:
            dump_stereo3d(ctx, sd);
            break;
        case AV_FRAME_DATA_DISPLAYMATRIX:
            av_log(ctx, AV_LOG_INFO, "displaymatrix: rotation of %.2f degrees",
                   av_display_rotation_get((int32_t *)sd->data));
            break;
        case AV_FRAME_DATA_AFD:
            av_log(ctx, AV_LOG_INFO, "afd: value of %"PRIu8, sd->data[0]);
            break;
        default:
            av_log(ctx, AV_LOG_WARNING, "unknown side data type %d (%d bytes)",
                   sd->type, sd->size);
            break;
        }

        av_log(ctx, AV_LOG_INFO, "\n");
    }

    return ff_filter_frame(inlink->dst->outputs[0], frame);
}
/*
 * Return
 * - 0 -- one packet was read and processed
 * - AVERROR(EAGAIN) -- no packets were available for selected file,
 *   this function should be called again
 * - AVERROR_EOF -- this function should not be called again
 */
static int process_input(int file_index)
{
    InputFile *ifile = input_files[file_index];
    AVFormatContext *is;
    InputStream *ist;
    AVPacket pkt;
    int ret, i, j;

    is  = ifile->ctx;
    // ****<<  Capture a frame: audio/video/subtitle
    ret = get_input_packet(ifile, &pkt); // ****<<  Call stack: av_read_frame() --> read_frame_internal() --> ff_read_packet() --> s->iformat->read_packet(s, pkt); --> dshow_read_frame();

    if (ret == AVERROR(EAGAIN)) {
        ifile->eagain = 1;
        return ret;
    }
    if (ret < 0) {
        if (ret != AVERROR_EOF) {
            print_error(is->filename, ret);
            if (exit_on_error)
                exit_program(1);
        }
        ifile->eof_reached = 1;

        for (i = 0; i < ifile->nb_streams; i++) {
            ist = input_streams[ifile->ist_index + i];
            if (ist->decoding_needed)
                output_packet(ist, NULL);

            /* mark all outputs that don't go through lavfi as finished */
            for (j = 0; j < nb_output_streams; j++) {
                OutputStream *ost = output_streams[j];

                if (ost->source_index == ifile->ist_index + i &&
                        (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
                    close_output_stream(ost);
            }
        }

        return AVERROR(EAGAIN);
    }

    reset_eagain();

    if (do_pkt_dump) {
        av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
                         is->streams[pkt.stream_index]);
    }
    /* the following test is needed in case new streams appear
       dynamically in stream : we ignore them */
    if (pkt.stream_index >= ifile->nb_streams) {
        report_new_stream(file_index, &pkt);
        goto discard_packet;
    }

    ist = input_streams[ifile->ist_index + pkt.stream_index];

    ist->data_size += pkt.size;
    ist->nb_packets++;

    if (ist->discard)
        goto discard_packet;

    if (debug_ts) {
        av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
               "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
               ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
               av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
               av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
               av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
               av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
               av_ts2str(input_files[ist->file_index]->ts_offset),
               av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
    }

    if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64) {
        int64_t stime, stime2;
        // Correcting starttime based on the enabled streams
        // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
        //       so we instead do it here as part of discontinuity handling
        if (   ist->next_dts == AV_NOPTS_VALUE
                && ifile->ts_offset == -is->start_time
                && (is->iformat->flags & AVFMT_TS_DISCONT)) {
            int64_t new_start_time = INT64_MAX;
            for (i=0; i<is->nb_streams; i++) {
                AVStream *st = is->streams[i];
                if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
                    continue;
                new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
            }
            if (new_start_time > is->start_time) {
                av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
                ifile->ts_offset = -new_start_time;
            }
        }

        stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
        stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
        ist->wrap_correction_done = 1;

        if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
            pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
            ist->wrap_correction_done = 0;
        }
        if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
            pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
            ist->wrap_correction_done = 0;
        }
    }

    /* add the stream-global side data to the first packet */
    if (ist->nb_packets == 1)
        if (ist->st->nb_side_data)
            av_packet_split_side_data(&pkt);
    for (i = 0; i < ist->st->nb_side_data; i++) {
        AVPacketSideData *src_sd = &ist->st->side_data[i];
        uint8_t *dst_data;

        if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
            continue;

        dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
        if (!dst_data)
            exit_program(1);

        memcpy(dst_data, src_sd->data, src_sd->size);
    }

    // ****<<  pkt.pts:   采集时间戳(以毫秒为单位; 一般为系统时间)
    // ****<<  ts_offset: 起始时间戳(以毫秒为单位; 一般为第一帧采集时间戳的相反数)
    // ****<<  最终.pts:  相对时间戳(以毫秒为单位; 从第一帧到现在的相对时间)
    if (pkt.dts != AV_NOPTS_VALUE)
        pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
    if (pkt.pts != AV_NOPTS_VALUE)
        pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);

    if (pkt.pts != AV_NOPTS_VALUE)
        pkt.pts *= ist->ts_scale;
    if (pkt.dts != AV_NOPTS_VALUE)
        pkt.dts *= ist->ts_scale;

    if (pkt.dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
            && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
        int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
        int64_t delta   = pkt_dts - ifile->last_ts;
        if(delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
                (delta > 1LL*dts_delta_threshold*AV_TIME_BASE &&
                 ist->dec_ctx->codec_type != AVMEDIA_TYPE_SUBTITLE)) {
            ifile->ts_offset -= delta;
            av_log(NULL, AV_LOG_DEBUG,
                   "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
                   delta, ifile->ts_offset);
            pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
            if (pkt.pts != AV_NOPTS_VALUE)
                pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
        }
    }

    if (pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
            !copy_ts) {
        int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
        int64_t delta   = pkt_dts - ist->next_dts;
        if (is->iformat->flags & AVFMT_TS_DISCONT) {
            if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
                    (delta > 1LL*dts_delta_threshold*AV_TIME_BASE &&
                     ist->dec_ctx->codec_type != AVMEDIA_TYPE_SUBTITLE) ||
                    pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
                ifile->ts_offset -= delta;
                av_log(NULL, AV_LOG_DEBUG,
                       "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
                       delta, ifile->ts_offset);
                pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
                if (pkt.pts != AV_NOPTS_VALUE)
                    pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
            }
        } else {
            if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
                    (delta > 1LL*dts_error_threshold*AV_TIME_BASE && ist->dec_ctx->codec_type != AVMEDIA_TYPE_SUBTITLE)) {
                av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
                pkt.dts = AV_NOPTS_VALUE;
            }
            if (pkt.pts != AV_NOPTS_VALUE) {
                int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
                delta   = pkt_pts - ist->next_dts;
                if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
                        (delta > 1LL*dts_error_threshold*AV_TIME_BASE && ist->dec_ctx->codec_type != AVMEDIA_TYPE_SUBTITLE)) {
                    av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
                    pkt.pts = AV_NOPTS_VALUE;
                }
            }
        }
    }

    if (pkt.dts != AV_NOPTS_VALUE)
        ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);

    if (debug_ts) {
        av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
               ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
               av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
               av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
               av_ts2str(input_files[ist->file_index]->ts_offset),
               av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
    }

    sub2video_heartbeat(ist, pkt.pts);

    ret = output_packet(ist, &pkt); // ****<<  see output_packet.c
    if (ret < 0) {
        av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
               ist->file_index, ist->st->index, av_err2str(ret));
        if (exit_on_error)
            exit_program(1);
    }

discard_packet:
    av_free_packet(&pkt);

    return 0;
}
Пример #15
0
int main(int argc, char **argv)
{
    int opt, ret, stream, flags;
    const char *filename;
    AVFormatContext *avf = NULL;
    int64_t min_ts, max_ts, ts;
    AVPacket packet;

    while ((opt = getopt(argc, argv, "h")) != -1) {
        switch (opt) {
        case 'h':
            usage(0);
        default:
            usage(1);
        }
    }
    argc -= optind;
    argv += optind;
    if (!argc)
        usage(1);
    filename = *argv;
    argv++;
    argc--;

    av_register_all();
    if ((ret = avformat_open_input(&avf, filename, NULL, NULL)) < 0) {
        fprintf(stderr, "%s: %s\n", filename, av_err2str(ret));
        return 1;
    }
    if ((ret = avformat_find_stream_info(avf, NULL)) < 0) {
        fprintf(stderr, "%s: could not find codec parameters: %s\n", filename,
                av_err2str(ret));
        return 1;
    }

    for (; argc; argc--, argv++) {
        if (!strcmp(*argv, "read")) {
            ret = av_read_frame(avf, &packet);
            if (ret < 0) {
                printf("read: %d (%s)\n", ret, av_err2str(ret));
            } else {
                AVRational *tb = &avf->streams[packet.stream_index]->time_base;
                printf("read: %d size=%d stream=%d dts=%s (%s) pts=%s (%s)\n",
                       ret, packet.size, packet.stream_index,
                       av_ts2str(packet.dts), av_ts2timestr(packet.dts, tb),
                       av_ts2str(packet.pts), av_ts2timestr(packet.pts, tb));
                av_packet_unref(&packet);
            }
        } else if (sscanf(*argv, "seek:%i:%"SCNi64":%"SCNi64":%"SCNi64":%i",
                   &stream, &min_ts, &ts, &max_ts, &flags) == 5) {
            ret = avformat_seek_file(avf, stream, min_ts, ts, max_ts, flags);
            printf("seek: %d (%s)\n", ret, av_err2str(ret));
        } else {
            fprintf(stderr, "'%s': unknown command\n", *argv);
            return 1;
        }
    }

    avformat_close_input(&avf);

    return 0;
}
Пример #16
0
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
    AVFilterContext *ctx = inlink->dst;
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
    uint32_t plane_checksum[4] = {0}, checksum = 0;
    int i, plane, vsub = desc->log2_chroma_h;

    for (plane = 0; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++) {
        int64_t linesize = av_image_get_linesize(frame->format, frame->width, plane);
        uint8_t *data = frame->data[plane];
        int h = plane == 1 || plane == 2 ? FF_CEIL_RSHIFT(inlink->h, vsub) : inlink->h;

        if (linesize < 0)
            return linesize;

        for (i = 0; i < h; i++) {
            plane_checksum[plane] = av_adler32_update(plane_checksum[plane], data, linesize);
            checksum = av_adler32_update(checksum, data, linesize);
            data += frame->linesize[plane];
        }
    }

    av_log(ctx, AV_LOG_INFO,
           "n:%"PRId64" pts:%s pts_time:%s pos:%"PRId64" "
           "fmt:%s sar:%d/%d s:%dx%d i:%c iskey:%d type:%c "
           "checksum:%08"PRIX32" plane_checksum:[%08"PRIX32,
           inlink->frame_count,
           av_ts2str(frame->pts), av_ts2timestr(frame->pts, &inlink->time_base), av_frame_get_pkt_pos(frame),
           desc->name,
           frame->sample_aspect_ratio.num, frame->sample_aspect_ratio.den,
           frame->width, frame->height,
           !frame->interlaced_frame ? 'P' :         /* Progressive  */
           frame->top_field_first   ? 'T' : 'B',    /* Top / Bottom */
           frame->key_frame,
           av_get_picture_type_char(frame->pict_type),
           checksum, plane_checksum[0]);

    for (plane = 1; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++)
        av_log(ctx, AV_LOG_INFO, " %08"PRIX32, plane_checksum[plane]);
    av_log(ctx, AV_LOG_INFO, "]\n");

    for (i = 0; i < frame->nb_side_data; i++) {
        AVFrameSideData *sd = frame->side_data[i];

        av_log(ctx, AV_LOG_INFO, "  side data - ");
        switch (sd->type) {
        case AV_FRAME_DATA_PANSCAN:
            av_log(ctx, AV_LOG_INFO, "pan/scan");
            break;
        case AV_FRAME_DATA_A53_CC:
            av_log(ctx, AV_LOG_INFO, "A/53 closed captions (%d bytes)", sd->size);
            break;
        case AV_FRAME_DATA_STEREO3D:
            dump_stereo3d(ctx, sd);
            break;
        default:
            av_log(ctx, AV_LOG_WARNING, "unknown side data type %d (%d bytes)",
                   sd->type, sd->size);
            break;
        }

        av_log(ctx, AV_LOG_INFO, "\n");
    }

    return ff_filter_frame(inlink->dst->outputs[0], frame);
}