コード例 #1
0
ファイル: vf_showinfo.c プロジェクト: Amalerd/SoxPlayer
static void end_frame(AVFilterLink *inlink)
{
    AVFilterContext *ctx = inlink->dst;
    ShowInfoContext *showinfo = ctx->priv;
    AVFilterBufferRef *picref = inlink->cur_buf;
    uint32_t plane_crc[4], crc = 0;
    int plane;

    for (plane = 0; plane < 4; plane++) {
        size_t linesize = av_image_get_linesize(picref->format, picref->video->w, plane);
        plane_crc[plane] = av_adler32_update(0  , picref->data[plane], linesize);
        crc              = av_adler32_update(crc, picref->data[plane], linesize);
    }

    av_log(ctx, AV_LOG_INFO,
           "n:%d pts:%"PRId64" pts_time:%f pos:%"PRId64" "
           "fmt:%s sar:%d/%d s:%dx%d i:%c iskey:%d type:%c "
           "crc:%u plane_crc:[%u %u %u %u]\n",
           showinfo->frame,
           picref->pts, picref ->pts * av_q2d(inlink->time_base), picref->pos,
           av_pix_fmt_descriptors[picref->format].name,
           picref->video->sample_aspect_ratio.num, picref->video->sample_aspect_ratio.den,
           picref->video->w, picref->video->h,
           !picref->video->interlaced     ? 'P' :         /* Progressive  */
           picref->video->top_field_first ? 'T' : 'B',    /* Top / Bottom */
           picref->video->key_frame,
           av_get_picture_type_char(picref->video->pict_type),
           crc, plane_crc[0], plane_crc[1], plane_crc[2], plane_crc[3]);

    showinfo->frame++;
    avfilter_end_frame(inlink->dst->outputs[0]);
}
コード例 #2
0
ファイル: msrle.c プロジェクト: artclarke/humble-video
static int msrle_decode_frame(AVCodecContext *avctx,
                              void *data, int *got_frame,
                              AVPacket *avpkt)
{
    const uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
    MsrleContext *s = avctx->priv_data;
    int istride = FFALIGN(avctx->width*avctx->bits_per_coded_sample, 32) / 8;
    int ret;

    s->buf = buf;
    s->size = buf_size;

    if ((ret = ff_reget_buffer(avctx, s->frame)) < 0)
        return ret;

    if (avctx->bits_per_coded_sample > 1 && avctx->bits_per_coded_sample <= 8) {
        int size;
        const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, &size);

        if (pal && size == AVPALETTE_SIZE) {
            s->frame->palette_has_changed = 1;
            memcpy(s->pal, pal, AVPALETTE_SIZE);
        } else if (pal) {
            av_log(avctx, AV_LOG_ERROR, "Palette size %d is wrong\n", size);
        }
        /* make the palette available */
        memcpy(s->frame->data[1], s->pal, AVPALETTE_SIZE);
    }

    /* FIXME how to correctly detect RLE ??? */
    if (avctx->height * istride == avpkt->size) { /* assume uncompressed */
        int linesize = av_image_get_linesize(avctx->pix_fmt, avctx->width, 0);
        uint8_t *ptr = s->frame->data[0];
        uint8_t *buf = avpkt->data + (avctx->height-1)*istride;
        int i, j;

        if (linesize < 0)
            return linesize;

        for (i = 0; i < avctx->height; i++) {
            if (avctx->bits_per_coded_sample == 4) {
                for (j = 0; j < avctx->width - 1; j += 2) {
                    ptr[j+0] = buf[j>>1] >> 4;
                    ptr[j+1] = buf[j>>1] & 0xF;
                }
                if (avctx->width & 1)
                    ptr[j+0] = buf[j>>1] >> 4;
            } else {
                memcpy(ptr, buf, linesize);
            }
            buf -= istride;
            ptr += s->frame->linesize[0];
        }
コード例 #3
0
ファイル: msrle.c プロジェクト: LazyZhu/rt-n56u-1
static int msrle_decode_frame(AVCodecContext *avctx,
                              void *data, int *data_size,
                              AVPacket *avpkt)
{
    const uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
    MsrleContext *s = avctx->priv_data;
    int istride = FFALIGN(avctx->width*avctx->bits_per_coded_sample, 32) / 8;

    s->buf = buf;
    s->size = buf_size;

    s->frame.reference = 3;
    s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
    if (avctx->reget_buffer(avctx, &s->frame)) {
        av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
        return -1;
    }

    if (avctx->bits_per_coded_sample > 1 && avctx->bits_per_coded_sample <= 8) {
        const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);

        if (pal) {
            s->frame.palette_has_changed = 1;
            memcpy(s->pal, pal, AVPALETTE_SIZE);
        }

        /* make the palette available */
        memcpy(s->frame.data[1], s->pal, AVPALETTE_SIZE);
    }

    /* FIXME how to correctly detect RLE ??? */
    if (avctx->height * istride == avpkt->size) { /* assume uncompressed */
        int linesize = av_image_get_linesize(avctx->pix_fmt, avctx->width, 0);
        uint8_t *ptr = s->frame.data[0];
        uint8_t *buf = avpkt->data + (avctx->height-1)*istride;
        int i, j;

        for (i = 0; i < avctx->height; i++) {
            if (avctx->bits_per_coded_sample == 4) {
                for (j = 0; j < avctx->width - 1; j += 2) {
                    ptr[j+0] = buf[j>>1] >> 4;
                    ptr[j+1] = buf[j>>1] & 0xF;
                }
                if (avctx->width & 1)
                    ptr[j+0] = buf[j>>1] >> 4;
            } else {
                memcpy(ptr, buf, linesize);
            }
            buf -= istride;
            ptr += s->frame.linesize[0];
        }
コード例 #4
0
/**
 * Copy picture field from src to dst.
 *
 * @param src_field copy from upper, lower field or both
 * @param interleave leave a padding line between each copied line
 * @param dst_field copy to upper or lower field,
 *        only meaningful when interleave is selected
 * @param flags context flags
 */
static inline
void copy_picture_field(TInterlaceContext *tinterlace,
                        uint8_t *dst[4], int dst_linesize[4],
                        const uint8_t *src[4], int src_linesize[4],
                        enum AVPixelFormat format, int w, int src_h,
                        int src_field, int interleave, int dst_field,
                        int flags)
{
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(format);
    int plane, vsub = desc->log2_chroma_h;
    int k = src_field == FIELD_UPPER_AND_LOWER ? 1 : 2;
    int h;

    for (plane = 0; plane < desc->nb_components; plane++) {
        int lines = plane == 1 || plane == 2 ? FF_CEIL_RSHIFT(src_h, vsub) : src_h;
        int cols  = plane == 1 || plane == 2 ? FF_CEIL_RSHIFT(    w, desc->log2_chroma_w) : w;
        int linesize = av_image_get_linesize(format, w, plane);
        uint8_t *dstp = dst[plane];
        const uint8_t *srcp = src[plane];

        if (linesize < 0)
            return;

        lines = (lines + (src_field == FIELD_UPPER)) / k;
        if (src_field == FIELD_LOWER)
            srcp += src_linesize[plane];
        if (interleave && dst_field == FIELD_LOWER)
            dstp += dst_linesize[plane];
        if (flags & TINTERLACE_FLAG_VLPF) {
            // Low-pass filtering is required when creating an interlaced destination from
            // a progressive source which contains high-frequency vertical detail.
            // Filtering will reduce interlace 'twitter' and Moire patterning.
            int srcp_linesize = src_linesize[plane] * k;
            int dstp_linesize = dst_linesize[plane] * (interleave ? 2 : 1);
            for (h = lines; h > 0; h--) {
                const uint8_t *srcp_above = srcp - src_linesize[plane];
                const uint8_t *srcp_below = srcp + src_linesize[plane];
                if (h == lines) srcp_above = srcp; // there is no line above
                if (h == 1) srcp_below = srcp;     // there is no line below

                tinterlace->lowpass_line(dstp, cols, srcp, srcp_above, srcp_below);
                dstp += dstp_linesize;
                srcp += srcp_linesize;
            }
        } else {
            av_image_copy_plane(dstp, dst_linesize[plane] * (interleave ? 2 : 1),
                            srcp, src_linesize[plane]*k, linesize, lines);
        }
    }
}
コード例 #5
0
ファイル: vf_fieldorder.c プロジェクト: JackDanger/libav
static int config_input(AVFilterLink *inlink)
{
    AVFilterContext   *ctx = inlink->dst;
    FieldOrderContext *s   = ctx->priv;
    int               plane;

    /** full an array with the number of bytes that the video
     *  data occupies per line for each plane of the input video */
    for (plane = 0; plane < 4; plane++) {
        s->line_size[plane] = av_image_get_linesize(inlink->format, inlink->w,
                                                    plane);
    }

    return 0;
}
コード例 #6
0
static int end_frame(AVFilterLink *inlink)
{
    AVFilterContext *ctx = inlink->dst;
    ShowInfoContext *showinfo = ctx->priv;
    AVFilterBufferRef *picref = inlink->cur_buf;
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
    uint32_t plane_checksum[4] = {0}, checksum = 0;
    int i, plane, vsub = desc->log2_chroma_h;

    for (plane = 0; picref->data[plane] && plane < 4; plane++) {
        int64_t linesize = av_image_get_linesize(picref->format, picref->video->w, plane);
        uint8_t *data = picref->data[plane];
        int h = plane == 1 || plane == 2 ? inlink->h >> vsub : inlink->h;

        if (linesize < 0)
            return linesize;

        for (i = 0; i < h; i++) {
            plane_checksum[plane] = av_adler32_update(plane_checksum[plane], data, linesize);
            checksum = av_adler32_update(checksum, data, linesize);
            data += picref->linesize[plane];
        }
    }

    av_log(ctx, AV_LOG_INFO,
           "n:%d pts:%s pts_time:%s pos:%"PRId64" "
           "fmt:%s sar:%d/%d s:%dx%d i:%c iskey:%d type:%c "
           "checksum:%08X plane_checksum:[%08X",
           showinfo->frame,
           av_ts2str(picref->pts), av_ts2timestr(picref->pts, &inlink->time_base), picref->pos,
           desc->name,
           picref->video->sample_aspect_ratio.num, picref->video->sample_aspect_ratio.den,
           picref->video->w, picref->video->h,
           !picref->video->interlaced     ? 'P' :         /* Progressive  */
           picref->video->top_field_first ? 'T' : 'B',    /* Top / Bottom */
           picref->video->key_frame,
           av_get_picture_type_char(picref->video->pict_type),
           checksum, plane_checksum[0]);

    for (plane = 1; picref->data[plane] && plane < 4; plane++)
        av_log(ctx, AV_LOG_INFO, " %08X", plane_checksum[plane]);
    av_log(ctx, AV_LOG_INFO, "]\n");

    showinfo->frame++;
    return ff_end_frame(inlink->dst->outputs[0]);
}
コード例 #7
0
ファイル: vf_interlace.c プロジェクト: plumbojumbo/FFmpeg
static void copy_picture_field(AVFrame *src_frame, AVFrame *dst_frame,
                               AVFilterLink *inlink, enum FieldType field_type,
                               int lowpass)
{
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
    int vsub = desc->log2_chroma_h;
    int plane, i, j;

    for (plane = 0; plane < desc->nb_components; plane++) {
        int lines = (plane == 1 || plane == 2) ? FF_CEIL_RSHIFT(inlink->h, vsub) : inlink->h;
        int linesize = av_image_get_linesize(inlink->format, inlink->w, plane);
        uint8_t *dstp = dst_frame->data[plane];
        const uint8_t *srcp = src_frame->data[plane];

        av_assert0(linesize >= 0);

        lines = (lines + (field_type == FIELD_UPPER)) / 2;
        if (field_type == FIELD_LOWER)
            srcp += src_frame->linesize[plane];
        if (field_type == FIELD_LOWER)
            dstp += dst_frame->linesize[plane];
        if (lowpass) {
            int srcp_linesize = src_frame->linesize[plane] * 2;
            int dstp_linesize = dst_frame->linesize[plane] * 2;
            for (j = lines; j > 0; j--) {
                const uint8_t *srcp_above = srcp - src_frame->linesize[plane];
                const uint8_t *srcp_below = srcp + src_frame->linesize[plane];
                if (j == lines)
                    srcp_above = srcp; // there is no line above
                if (j == 1)
                    srcp_below = srcp; // there is no line below
                for (i = 0; i < linesize; i++) {
                    // this calculation is an integer representation of
                    // '0.5 * current + 0.25 * above + 0.25 * below'
                    // '1 +' is for rounding.
                    dstp[i] = (1 + srcp[i] + srcp[i] + srcp_above[i] + srcp_below[i]) >> 2;
                }
                dstp += dstp_linesize;
                srcp += srcp_linesize;
            }
        } else {
            av_image_copy_plane(dstp, dst_frame->linesize[plane] * 2,
                                srcp, src_frame->linesize[plane] * 2,
                                linesize, lines);
        }
    }
コード例 #8
0
static void copy_picture_field(InterlaceContext *s,
                               AVFrame *src_frame, AVFrame *dst_frame,
                               AVFilterLink *inlink, enum FieldType field_type,
                               int lowpass)
{
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
    int vsub = desc->log2_chroma_h;
    int plane, j;

    for (plane = 0; plane < desc->nb_components; plane++) {
        int lines = (plane == 1 || plane == 2) ? FF_CEIL_RSHIFT(inlink->h, vsub) : inlink->h;
        ptrdiff_t linesize = av_image_get_linesize(inlink->format, inlink->w, plane);
        uint8_t *dstp = dst_frame->data[plane];
        const uint8_t *srcp = src_frame->data[plane];

        av_assert0(linesize >= 0);

        lines = (lines + (field_type == FIELD_UPPER)) / 2;
        if (field_type == FIELD_LOWER)
            srcp += src_frame->linesize[plane];
        if (field_type == FIELD_LOWER)
            dstp += dst_frame->linesize[plane];
        if (lowpass) {
            int srcp_linesize = src_frame->linesize[plane] * 2;
            int dstp_linesize = dst_frame->linesize[plane] * 2;
            for (j = lines; j > 0; j--) {
                const uint8_t *srcp_above = srcp - src_frame->linesize[plane];
                const uint8_t *srcp_below = srcp + src_frame->linesize[plane];
                if (j == lines)
                    srcp_above = srcp; // there is no line above
                if (j == 1)
                    srcp_below = srcp; // there is no line below
                s->lowpass_line(dstp, linesize, srcp, srcp_above, srcp_below);
                dstp += dstp_linesize;
                srcp += srcp_linesize;
            }
        } else {
            av_image_copy_plane(dstp, dst_frame->linesize[plane] * 2,
                                srcp, src_frame->linesize[plane] * 2,
                                linesize, lines);
        }
    }
}
コード例 #9
0
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
    AVFilterContext *ctx = inlink->dst;
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
    uint32_t plane_checksum[4] = {0}, checksum = 0;
    int i, plane, vsub = desc->log2_chroma_h;

    for (plane = 0; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++) {
        int64_t linesize = av_image_get_linesize(frame->format, frame->width, plane);
        uint8_t *data = frame->data[plane];
        int h = plane == 1 || plane == 2 ? FF_CEIL_RSHIFT(inlink->h, vsub) : inlink->h;

        if (linesize < 0)
            return linesize;

        for (i = 0; i < h; i++) {
            plane_checksum[plane] = av_adler32_update(plane_checksum[plane], data, linesize);
            checksum = av_adler32_update(checksum, data, linesize);
            data += frame->linesize[plane];
        }
    }

    av_log(ctx, AV_LOG_INFO,
           "n:%"PRId64" pts:%s pts_time:%s pos:%"PRId64" "
           "fmt:%s sar:%d/%d s:%dx%d i:%c iskey:%d type:%c "
           "checksum:%08X plane_checksum:[%08X",
           inlink->frame_count,
           av_ts2str(frame->pts), av_ts2timestr(frame->pts, &inlink->time_base), av_frame_get_pkt_pos(frame),
           desc->name,
           frame->sample_aspect_ratio.num, frame->sample_aspect_ratio.den,
           frame->width, frame->height,
           !frame->interlaced_frame ? 'P' :         /* Progressive  */
           frame->top_field_first   ? 'T' : 'B',    /* Top / Bottom */
           frame->key_frame,
           av_get_picture_type_char(frame->pict_type),
           checksum, plane_checksum[0]);

    for (plane = 1; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++)
        av_log(ctx, AV_LOG_INFO, " %08X", plane_checksum[plane]);
    av_log(ctx, AV_LOG_INFO, "]\n");

    return ff_filter_frame(inlink->dst->outputs[0], frame);
}
コード例 #10
0
ファイル: vf_showinfo.c プロジェクト: Arcen/libav
static void end_frame(AVFilterLink *inlink)
{
    AVFilterContext *ctx = inlink->dst;
    ShowInfoContext *showinfo = ctx->priv;
    AVFilterBufferRef *picref = inlink->cur_buf;
    uint32_t plane_checksum[4] = {0}, checksum = 0;
    int i, plane, vsub = av_pix_fmt_descriptors[inlink->format].log2_chroma_h;

    for (plane = 0; picref->data[plane] && plane < 4; plane++) {
        size_t linesize = av_image_get_linesize(picref->format, picref->video->w, plane);
        uint8_t *data = picref->data[plane];
        int h = plane == 1 || plane == 2 ? inlink->h >> vsub : inlink->h;

        for (i = 0; i < h; i++) {
            plane_checksum[plane] = av_adler32_update(plane_checksum[plane], data, linesize);
            checksum = av_adler32_update(checksum, data, linesize);
            data += picref->linesize[plane];
        }
    }

    av_log(ctx, AV_LOG_INFO,
           "n:%d pts:%"PRId64" pts_time:%f pos:%"PRId64" "
           "fmt:%s sar:%d/%d s:%dx%d i:%c iskey:%d type:%c "
           "checksum:%u plane_checksum:[%u %u %u %u]\n",
           showinfo->frame,
           picref->pts, picref->pts * av_q2d(inlink->time_base), picref->pos,
           av_pix_fmt_descriptors[picref->format].name,
           picref->video->pixel_aspect.num, picref->video->pixel_aspect.den,
           picref->video->w, picref->video->h,
           !picref->video->interlaced     ? 'P' :         /* Progressive  */
           picref->video->top_field_first ? 'T' : 'B',    /* Top / Bottom */
           picref->video->key_frame,
           av_get_picture_type_char(picref->video->pict_type),
           checksum, plane_checksum[0], plane_checksum[1], plane_checksum[2], plane_checksum[3]);

    showinfo->frame++;
    ff_end_frame(inlink->dst->outputs[0]);
}
コード例 #11
0
ファイル: crystalhd.c プロジェクト: Tjoppen/FFmpeg
static av_cold int init(AVCodecContext *avctx)
{
    CHDContext* priv;
    BC_STATUS ret;
    BC_INFO_CRYSTAL version;
    BC_INPUT_FORMAT format = {
        .FGTEnable   = FALSE,
        .Progressive = TRUE,
        .OptFlags    = 0x80000000 | vdecFrameRate59_94 | 0x40,
        .width       = avctx->width,
        .height      = avctx->height,
    };

    BC_MEDIA_SUBTYPE subtype;

    uint32_t mode = DTS_PLAYBACK_MODE |
                    DTS_LOAD_FILE_PLAY_FW |
                    DTS_SKIP_TX_CHK_CPB |
                    DTS_PLAYBACK_DROP_RPT_MODE |
                    DTS_SINGLE_THREADED_MODE |
                    DTS_DFLT_RESOLUTION(vdecRESOLUTION_1080p23_976);

    av_log(avctx, AV_LOG_VERBOSE, "CrystalHD Init for %s\n",
           avctx->codec->name);

    avctx->pix_fmt = PIX_FMT_YUYV422;

    /* Initialize the library */
    priv               = avctx->priv_data;
    priv->avctx        = avctx;
    priv->is_nal       = avctx->extradata_size > 0 && *(avctx->extradata) == 1;
    priv->last_picture = -1;
    priv->decode_wait  = BASE_WAIT;

    subtype = id2subtype(priv, avctx->codec->id);
    switch (subtype) {
    case BC_MSUBTYPE_AVC1:
        {
            uint8_t *dummy_p;
            int dummy_int;

            priv->bsfc = av_bitstream_filter_init("h264_mp4toannexb");
            if (!priv->bsfc) {
                av_log(avctx, AV_LOG_ERROR,
                       "Cannot open the h264_mp4toannexb BSF!\n");
                return AVERROR_BSF_NOT_FOUND;
            }
            av_bitstream_filter_filter(priv->bsfc, avctx, NULL, &dummy_p,
                                       &dummy_int, NULL, 0, 0);
        }
        subtype = BC_MSUBTYPE_H264;
        // Fall-through
    case BC_MSUBTYPE_H264:
        format.startCodeSz = 4;
        // Fall-through
    case BC_MSUBTYPE_VC1:
    case BC_MSUBTYPE_WVC1:
    case BC_MSUBTYPE_WMV3:
    case BC_MSUBTYPE_WMVA:
    case BC_MSUBTYPE_MPEG2VIDEO:
    case BC_MSUBTYPE_DIVX:
    case BC_MSUBTYPE_DIVX311:
        format.pMetaData  = avctx->extradata;
        format.metaDataSz = avctx->extradata_size;
        break;
    default:
        av_log(avctx, AV_LOG_ERROR, "CrystalHD: Unknown codec name\n");
        return AVERROR(EINVAL);
    }
    format.mSubtype = subtype;

    if (priv->sWidth) {
        format.bEnableScaling = 1;
        format.ScalingParams.sWidth = priv->sWidth;
    }

    /* Get a decoder instance */
    av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: starting up\n");
    // Initialize the Link and Decoder devices
    ret = DtsDeviceOpen(&priv->dev, mode);
    if (ret != BC_STS_SUCCESS) {
        av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: DtsDeviceOpen failed\n");
        goto fail;
    }

    ret = DtsCrystalHDVersion(priv->dev, &version);
    if (ret != BC_STS_SUCCESS) {
        av_log(avctx, AV_LOG_VERBOSE,
               "CrystalHD: DtsCrystalHDVersion failed\n");
        goto fail;
    }
    priv->is_70012 = version.device == 0;

    if (priv->is_70012 &&
        (subtype == BC_MSUBTYPE_DIVX || subtype == BC_MSUBTYPE_DIVX311)) {
        av_log(avctx, AV_LOG_VERBOSE,
               "CrystalHD: BCM70012 doesn't support MPEG4-ASP/DivX/Xvid\n");
        goto fail;
    }

    ret = DtsSetInputFormat(priv->dev, &format);
    if (ret != BC_STS_SUCCESS) {
        av_log(avctx, AV_LOG_ERROR, "CrystalHD: SetInputFormat failed\n");
        goto fail;
    }

    ret = DtsOpenDecoder(priv->dev, BC_STREAM_TYPE_ES);
    if (ret != BC_STS_SUCCESS) {
        av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsOpenDecoder failed\n");
        goto fail;
    }

    ret = DtsSetColorSpace(priv->dev, OUTPUT_MODE422_YUY2);
    if (ret != BC_STS_SUCCESS) {
        av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsSetColorSpace failed\n");
        goto fail;
    }
    ret = DtsStartDecoder(priv->dev);
    if (ret != BC_STS_SUCCESS) {
        av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsStartDecoder failed\n");
        goto fail;
    }
    ret = DtsStartCapture(priv->dev);
    if (ret != BC_STS_SUCCESS) {
        av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsStartCapture failed\n");
        goto fail;
    }

    if (avctx->codec->id == CODEC_ID_H264) {
        priv->parser = av_parser_init(avctx->codec->id);
        if (!priv->parser)
            av_log(avctx, AV_LOG_WARNING,
                   "Cannot open the h.264 parser! Interlaced h.264 content "
                   "will not be detected reliably.\n");
        priv->parser->flags = PARSER_FLAG_COMPLETE_FRAMES;
    }
    av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Init complete.\n");

    return 0;

 fail:
    uninit(avctx);
    return -1;
}


static inline CopyRet copy_frame(AVCodecContext *avctx,
                                 BC_DTS_PROC_OUT *output,
                                 void *data, int *data_size)
{
    BC_STATUS ret;
    BC_DTS_STATUS decoder_status;
    uint8_t trust_interlaced;
    uint8_t interlaced;

    CHDContext *priv = avctx->priv_data;
    int64_t pkt_pts  = AV_NOPTS_VALUE;
    uint8_t pic_type = 0;

    uint8_t bottom_field = (output->PicInfo.flags & VDEC_FLAG_BOTTOMFIELD) ==
                           VDEC_FLAG_BOTTOMFIELD;
    uint8_t bottom_first = !!(output->PicInfo.flags & VDEC_FLAG_BOTTOM_FIRST);

    int width    = output->PicInfo.width;
    int height   = output->PicInfo.height;
    int bwidth;
    uint8_t *src = output->Ybuff;
    int sStride;
    uint8_t *dst;
    int dStride;

    if (output->PicInfo.timeStamp != 0) {
        OpaqueList *node = opaque_list_pop(priv, output->PicInfo.timeStamp);
        if (node) {
            pkt_pts = node->reordered_opaque;
            pic_type = node->pic_type;
            av_free(node);
        } else {
            /*
             * We will encounter a situation where a timestamp cannot be
             * popped if a second field is being returned. In this case,
             * each field has the same timestamp and the first one will
             * cause it to be popped. To keep subsequent calculations
             * simple, pic_type should be set a FIELD value - doesn't
             * matter which, but I chose BOTTOM.
             */
            pic_type = PICT_BOTTOM_FIELD;
        }
        av_log(avctx, AV_LOG_VERBOSE, "output \"pts\": %"PRIu64"\n",
               output->PicInfo.timeStamp);
        av_log(avctx, AV_LOG_VERBOSE, "output picture type %d\n",
               pic_type);
    }

    ret = DtsGetDriverStatus(priv->dev, &decoder_status);
    if (ret != BC_STS_SUCCESS) {
        av_log(avctx, AV_LOG_ERROR,
               "CrystalHD: GetDriverStatus failed: %u\n", ret);
       return RET_ERROR;
    }

    /*
     * For most content, we can trust the interlaced flag returned
     * by the hardware, but sometimes we can't. These are the
     * conditions under which we can trust the flag:
     *
     * 1) It's not h.264 content
     * 2) The UNKNOWN_SRC flag is not set
     * 3) We know we're expecting a second field
     * 4) The hardware reports this picture and the next picture
     *    have the same picture number.
     *
     * Note that there can still be interlaced content that will
     * fail this check, if the hardware hasn't decoded the next
     * picture or if there is a corruption in the stream. (In either
     * case a 0 will be returned for the next picture number)
     */
    trust_interlaced = avctx->codec->id != CODEC_ID_H264 ||
                       !(output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC) ||
                       priv->need_second_field ||
                       (decoder_status.picNumFlags & ~0x40000000) ==
                       output->PicInfo.picture_number;

    /*
     * If we got a false negative for trust_interlaced on the first field,
     * we will realise our mistake here when we see that the picture number is that
     * of the previous picture. We cannot recover the frame and should discard the
     * second field to keep the correct number of output frames.
     */
    if (output->PicInfo.picture_number == priv->last_picture && !priv->need_second_field) {
        av_log(avctx, AV_LOG_WARNING,
               "Incorrectly guessed progressive frame. Discarding second field\n");
        /* Returning without providing a picture. */
        return RET_OK;
    }

    interlaced = (output->PicInfo.flags & VDEC_FLAG_INTERLACED_SRC) &&
                 trust_interlaced;

    if (!trust_interlaced && (decoder_status.picNumFlags & ~0x40000000) == 0) {
        av_log(avctx, AV_LOG_VERBOSE,
               "Next picture number unknown. Assuming progressive frame.\n");
    }

    av_log(avctx, AV_LOG_VERBOSE, "Interlaced state: %d | trust_interlaced %d\n",
           interlaced, trust_interlaced);

    if (priv->pic.data[0] && !priv->need_second_field)
        avctx->release_buffer(avctx, &priv->pic);

    priv->need_second_field = interlaced && !priv->need_second_field;

    priv->pic.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE |
                             FF_BUFFER_HINTS_REUSABLE;
    if (!priv->pic.data[0]) {
        if (avctx->get_buffer(avctx, &priv->pic) < 0) {
            av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
            return RET_ERROR;
        }
    }

    bwidth = av_image_get_linesize(avctx->pix_fmt, width, 0);
    if (priv->is_70012) {
        int pStride;

        if (width <= 720)
            pStride = 720;
        else if (width <= 1280)
            pStride = 1280;
        else if (width <= 1080)
            pStride = 1080;
        sStride = av_image_get_linesize(avctx->pix_fmt, pStride, 0);
    } else {
        sStride = bwidth;
    }

    dStride = priv->pic.linesize[0];
    dst     = priv->pic.data[0];

    av_log(priv->avctx, AV_LOG_VERBOSE, "CrystalHD: Copying out frame\n");

    if (interlaced) {
        int dY = 0;
        int sY = 0;

        height /= 2;
        if (bottom_field) {
            av_log(priv->avctx, AV_LOG_VERBOSE, "Interlaced: bottom field\n");
            dY = 1;
        } else {
            av_log(priv->avctx, AV_LOG_VERBOSE, "Interlaced: top field\n");
            dY = 0;
        }

        for (sY = 0; sY < height; dY++, sY++) {
            memcpy(&(dst[dY * dStride]), &(src[sY * sStride]), bwidth);
            dY++;
        }
    } else {
        av_image_copy_plane(dst, dStride, src, sStride, bwidth, height);
    }

    priv->pic.interlaced_frame = interlaced;
    if (interlaced)
        priv->pic.top_field_first = !bottom_first;

    priv->pic.pkt_pts = pkt_pts;

    if (!priv->need_second_field) {
        *data_size       = sizeof(AVFrame);
        *(AVFrame *)data = priv->pic;
    }

    /*
     * Two types of PAFF content have been observed. One form causes the
     * hardware to return a field pair and the other individual fields,
     * even though the input is always individual fields. We must skip
     * copying on the next decode() call to maintain pipeline length in
     * the first case.
     */
    if (!interlaced && (output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC) &&
        (pic_type == PICT_TOP_FIELD || pic_type == PICT_BOTTOM_FIELD)) {
        av_log(priv->avctx, AV_LOG_VERBOSE, "Fieldpair from two packets.\n");
        return RET_SKIP_NEXT_COPY;
    }

    /*
     * Testing has shown that in all cases where we don't want to return the
     * full frame immediately, VDEC_FLAG_UNKNOWN_SRC is set.
     */
    return priv->need_second_field &&
           !(output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC) ?
           RET_COPY_NEXT_FIELD : RET_OK;
}
コード例 #12
0
ファイル: vf_showinfo.c プロジェクト: BEforlin/TCC_ffmpeg
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
    AVFilterContext *ctx = inlink->dst;
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
    uint32_t plane_checksum[4] = {0}, checksum = 0;
    int i, plane, vsub = desc->log2_chroma_h;

    for (plane = 0; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++) {
        int64_t linesize = av_image_get_linesize(frame->format, frame->width, plane);
        uint8_t *data = frame->data[plane];
        int h = plane == 1 || plane == 2 ? FF_CEIL_RSHIFT(inlink->h, vsub) : inlink->h;

        if (linesize < 0)
            return linesize;

        for (i = 0; i < h; i++) {
            plane_checksum[plane] = av_adler32_update(plane_checksum[plane], data, linesize);
            checksum = av_adler32_update(checksum, data, linesize);
            data += frame->linesize[plane];
        }
    }

    av_log(ctx, AV_LOG_INFO,
           "n:%"PRId64" pts:%s pts_time:%s pos:%"PRId64" "
           "fmt:%s sar:%d/%d s:%dx%d i:%c iskey:%d type:%c "
           "checksum:%08"PRIX32" plane_checksum:[%08"PRIX32,
           inlink->frame_count,
           av_ts2str(frame->pts), av_ts2timestr(frame->pts, &inlink->time_base), av_frame_get_pkt_pos(frame),
           desc->name,
           frame->sample_aspect_ratio.num, frame->sample_aspect_ratio.den,
           frame->width, frame->height,
           !frame->interlaced_frame ? 'P' :         /* Progressive  */
           frame->top_field_first   ? 'T' : 'B',    /* Top / Bottom */
           frame->key_frame,
           av_get_picture_type_char(frame->pict_type),
           checksum, plane_checksum[0]);

    for (plane = 1; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++)
        av_log(ctx, AV_LOG_INFO, " %08"PRIX32, plane_checksum[plane]);
    av_log(ctx, AV_LOG_INFO, "]\n");

    for (i = 0; i < frame->nb_side_data; i++) {
        AVFrameSideData *sd = frame->side_data[i];

        av_log(ctx, AV_LOG_INFO, "  side data - ");
        switch (sd->type) {
        case AV_FRAME_DATA_PANSCAN:
            av_log(ctx, AV_LOG_INFO, "pan/scan");
            break;
        case AV_FRAME_DATA_A53_CC:
            av_log(ctx, AV_LOG_INFO, "A/53 closed captions (%d bytes)", sd->size);
            break;
        case AV_FRAME_DATA_STEREO3D:
            dump_stereo3d(ctx, sd);
            break;
        default:
            av_log(ctx, AV_LOG_WARNING, "unknown side data type %d (%d bytes)",
                   sd->type, sd->size);
            break;
        }

        av_log(ctx, AV_LOG_INFO, "\n");
    }

    return ff_filter_frame(inlink->dst->outputs[0], frame);
}
コード例 #13
0
ファイル: VideoFormat.cpp プロジェクト: sweatball/QtAV
 int bytesPerLine(int width, int plane) const {
     return av_image_get_linesize(pixfmt_ff, width, plane);
 }
コード例 #14
0
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
    AVFilterContext *ctx = inlink->dst;
    ShowInfoContext *showinfo = ctx->priv;
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
    uint32_t plane_checksum[4] = {0}, checksum = 0;
    int i, plane, vsub = desc->log2_chroma_h;

    for (plane = 0; frame->data[plane] && plane < 4; plane++) {
        size_t linesize = av_image_get_linesize(frame->format, frame->width, plane);
        uint8_t *data = frame->data[plane];
        int h = plane == 1 || plane == 2 ? inlink->h >> vsub : inlink->h;

        for (i = 0; i < h; i++) {
            plane_checksum[plane] = av_adler32_update(plane_checksum[plane], data, linesize);
            checksum = av_adler32_update(checksum, data, linesize);
            data += frame->linesize[plane];
        }
    }

    av_log(ctx, AV_LOG_INFO,
           "n:%d pts:%"PRId64" pts_time:%f "
           "fmt:%s sar:%d/%d s:%dx%d i:%c iskey:%d type:%c "
           "checksum:%"PRIu32" plane_checksum:[%"PRIu32" %"PRIu32" %"PRIu32" %"PRIu32"]\n",
           showinfo->frame,
           frame->pts, frame->pts * av_q2d(inlink->time_base),
           desc->name,
           frame->sample_aspect_ratio.num, frame->sample_aspect_ratio.den,
           frame->width, frame->height,
           !frame->interlaced_frame ? 'P' :         /* Progressive  */
           frame->top_field_first   ? 'T' : 'B',    /* Top / Bottom */
           frame->key_frame,
           av_get_picture_type_char(frame->pict_type),
           checksum, plane_checksum[0], plane_checksum[1], plane_checksum[2], plane_checksum[3]);

    for (i = 0; i < frame->nb_side_data; i++) {
        AVFrameSideData *sd = frame->side_data[i];

        av_log(ctx, AV_LOG_INFO, "  side data - ");
        switch (sd->type) {
        case AV_FRAME_DATA_PANSCAN:
            av_log(ctx, AV_LOG_INFO, "pan/scan");
            break;
        case AV_FRAME_DATA_A53_CC:
            av_log(ctx, AV_LOG_INFO, "A/53 closed captions (%d bytes)", sd->size);
            break;
        case AV_FRAME_DATA_STEREO3D:
            dump_stereo3d(ctx, sd);
            break;
        case AV_FRAME_DATA_DISPLAYMATRIX:
            av_log(ctx, AV_LOG_INFO, "displaymatrix: rotation of %.2f degrees",
                   av_display_rotation_get((int32_t *)sd->data));
            break;
        case AV_FRAME_DATA_AFD:
            av_log(ctx, AV_LOG_INFO, "afd: value of %"PRIu8, sd->data[0]);
            break;
        default:
            av_log(ctx, AV_LOG_WARNING, "unknown side data type %d (%d bytes)",
                   sd->type, sd->size);
            break;
        }

        av_log(ctx, AV_LOG_INFO, "\n");
    }

    showinfo->frame++;
    return ff_filter_frame(inlink->dst->outputs[0], frame);
}
コード例 #15
0
ファイル: dds.c プロジェクト: MAXsundai/FFmpeg
static int dds_decode(AVCodecContext *avctx, void *data,
                      int *got_frame, AVPacket *avpkt)
{
    DDSContext *ctx = avctx->priv_data;
    GetByteContext *gbc = &ctx->gbc;
    AVFrame *frame = data;
    int mipmap;
    int ret;

    ff_texturedsp_init(&ctx->texdsp);
    bytestream2_init(gbc, avpkt->data, avpkt->size);

    if (bytestream2_get_bytes_left(gbc) < 128) {
        av_log(avctx, AV_LOG_ERROR, "Frame is too small (%d).\n",
               bytestream2_get_bytes_left(gbc));
        return AVERROR_INVALIDDATA;
    }

    if (bytestream2_get_le32(gbc) != MKTAG('D', 'D', 'S', ' ') ||
            bytestream2_get_le32(gbc) != 124) { // header size
        av_log(avctx, AV_LOG_ERROR, "Invalid DDS header.\n");
        return AVERROR_INVALIDDATA;
    }

    bytestream2_skip(gbc, 4); // flags

    avctx->height = bytestream2_get_le32(gbc);
    avctx->width  = bytestream2_get_le32(gbc);
    ret = av_image_check_size(avctx->width, avctx->height, 0, avctx);
    if (ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "Invalid image size %dx%d.\n",
               avctx->width, avctx->height);
        return ret;
    }

    /* Since codec is based on 4x4 blocks, size is aligned to 4. */
    avctx->coded_width  = FFALIGN(avctx->width,  TEXTURE_BLOCK_W);
    avctx->coded_height = FFALIGN(avctx->height, TEXTURE_BLOCK_H);

    bytestream2_skip(gbc, 4); // pitch
    bytestream2_skip(gbc, 4); // depth
    mipmap = bytestream2_get_le32(gbc);
    if (mipmap != 0)
        av_log(avctx, AV_LOG_VERBOSE, "Found %d mipmaps (ignored).\n", mipmap);

    /* Extract pixel format information, considering additional elements
     * in reserved1 and reserved2. */
    ret = parse_pixel_format(avctx);
    if (ret < 0)
        return ret;

    ret = ff_get_buffer(avctx, frame, 0);
    if (ret < 0)
        return ret;

    if (ctx->compressed) {
        int size = (avctx->coded_height / TEXTURE_BLOCK_H) *
                   (avctx->coded_width / TEXTURE_BLOCK_W) * ctx->tex_ratio;
        ctx->slice_count = av_clip(avctx->thread_count, 1,
                                   avctx->coded_height / TEXTURE_BLOCK_H);

        if (bytestream2_get_bytes_left(gbc) < size) {
            av_log(avctx, AV_LOG_ERROR,
                   "Compressed Buffer is too small (%d < %d).\n",
                   bytestream2_get_bytes_left(gbc), size);
            return AVERROR_INVALIDDATA;
        }

        /* Use the decompress function on the texture, one block per thread. */
        ctx->tex_data = gbc->buffer;
        avctx->execute2(avctx, decompress_texture_thread, frame, NULL, ctx->slice_count);
    } else {
        int linesize = av_image_get_linesize(avctx->pix_fmt, frame->width, 0);

        if (ctx->paletted) {
            int i;
            /* Use the first 1024 bytes as palette, then copy the rest. */
            bytestream2_get_buffer(gbc, frame->data[1], 256 * 4);
            for (i = 0; i < 256; i++)
                AV_WN32(frame->data[1] + i*4,
                        (frame->data[1][2+i*4]<<0)+
                        (frame->data[1][1+i*4]<<8)+
                        (frame->data[1][0+i*4]<<16)+
                        (frame->data[1][3+i*4]<<24)
                       );

            frame->palette_has_changed = 1;
        }

        if (bytestream2_get_bytes_left(gbc) < frame->height * linesize) {
            av_log(avctx, AV_LOG_ERROR, "Buffer is too small (%d < %d).\n",
                   bytestream2_get_bytes_left(gbc), frame->height * linesize);
            return AVERROR_INVALIDDATA;
        }

        av_image_copy_plane(frame->data[0], frame->linesize[0],
                            gbc->buffer, linesize,
                            linesize, frame->height);
    }

    /* Run any post processing here if needed. */
    if (avctx->pix_fmt == AV_PIX_FMT_BGRA ||
            avctx->pix_fmt == AV_PIX_FMT_RGBA ||
            avctx->pix_fmt == AV_PIX_FMT_RGB0 ||
            avctx->pix_fmt == AV_PIX_FMT_BGR0 ||
            avctx->pix_fmt == AV_PIX_FMT_YA8)
        run_postproc(avctx, frame);

    /* Frame is ready to be output. */
    frame->pict_type = AV_PICTURE_TYPE_I;
    frame->key_frame = 1;
    *got_frame = 1;

    return avpkt->size;
}
コード例 #16
0
static av_cold int init(AVCodecContext *avctx)
{
	CHDContext* priv;
	BC_STATUS ret;
	BC_INFO_CRYSTAL version;
	BC_INPUT_FORMAT format =
	{
		.FGTEnable   = FALSE,
		.Progressive = TRUE,
		.OptFlags    = 0x80000000 | vdecFrameRate59_94 | 0x40,
		.width       = avctx->width,
		.height      = avctx->height,
	};

	BC_MEDIA_SUBTYPE subtype;

	uint32_t mode = DTS_PLAYBACK_MODE |
	                DTS_LOAD_FILE_PLAY_FW |
	                DTS_SKIP_TX_CHK_CPB |
	                DTS_PLAYBACK_DROP_RPT_MODE |
	                DTS_SINGLE_THREADED_MODE |
	                DTS_DFLT_RESOLUTION(vdecRESOLUTION_1080p23_976);

	av_log(avctx, AV_LOG_VERBOSE, "CrystalHD Init for %s\n",
	       avctx->codec->name);

	avctx->pix_fmt = PIX_FMT_YUYV422;

	/* Initialize the library */
	priv               = avctx->priv_data;
	priv->avctx        = avctx;
	priv->is_nal       = avctx->extradata_size > 0 && *(avctx->extradata) == 1;
	priv->last_picture = -1;
	priv->decode_wait  = BASE_WAIT;

	subtype = id2subtype(priv, avctx->codec->id);
	switch (subtype)
	{
	case BC_MSUBTYPE_AVC1:
	{
		uint8_t *dummy_p;
		int dummy_int;
		AVBitStreamFilterContext *bsfc;

		uint32_t orig_data_size = avctx->extradata_size;
		uint8_t *orig_data = av_malloc(orig_data_size);
		if (!orig_data)
		{
			av_log(avctx, AV_LOG_ERROR,
			       "Failed to allocate copy of extradata\n");
			return AVERROR(ENOMEM);
		}
		memcpy(orig_data, avctx->extradata, orig_data_size);


		bsfc = av_bitstream_filter_init("h264_mp4toannexb");
		if (!bsfc)
		{
			av_log(avctx, AV_LOG_ERROR,
			       "Cannot open the h264_mp4toannexb BSF!\n");
			av_free(orig_data);
			return AVERROR_BSF_NOT_FOUND;
		}
		av_bitstream_filter_filter(bsfc, avctx, NULL, &dummy_p,
		                           &dummy_int, NULL, 0, 0);
		av_bitstream_filter_close(bsfc);

		priv->sps_pps_buf     = avctx->extradata;
		priv->sps_pps_size    = avctx->extradata_size;
		avctx->extradata      = orig_data;
		avctx->extradata_size = orig_data_size;

		format.pMetaData   = priv->sps_pps_buf;
		format.metaDataSz  = priv->sps_pps_size;
		format.startCodeSz = (avctx->extradata[4] & 0x03) + 1;
	}
	break;
	case BC_MSUBTYPE_H264:
		format.startCodeSz = 4;
		// Fall-through
	case BC_MSUBTYPE_VC1:
	case BC_MSUBTYPE_WVC1:
	case BC_MSUBTYPE_WMV3:
	case BC_MSUBTYPE_WMVA:
	case BC_MSUBTYPE_MPEG2VIDEO:
	case BC_MSUBTYPE_DIVX:
	case BC_MSUBTYPE_DIVX311:
		format.pMetaData  = avctx->extradata;
		format.metaDataSz = avctx->extradata_size;
		break;
	default:
		av_log(avctx, AV_LOG_ERROR, "CrystalHD: Unknown codec name\n");
		return AVERROR(EINVAL);
	}
	format.mSubtype = subtype;

	/* Get a decoder instance */
	av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: starting up\n");
	// Initialize the Link and Decoder devices
	ret = DtsDeviceOpen(&priv->dev, mode);
	if (ret != BC_STS_SUCCESS)
	{
		av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: DtsDeviceOpen failed\n");
		goto fail;
	}

	ret = DtsCrystalHDVersion(priv->dev, &version);
	if (ret != BC_STS_SUCCESS)
	{
		av_log(avctx, AV_LOG_VERBOSE,
		       "CrystalHD: DtsCrystalHDVersion failed\n");
		goto fail;
	}
	priv->is_70012 = version.device == 0;

	if (priv->is_70012 &&
	        (subtype == BC_MSUBTYPE_DIVX || subtype == BC_MSUBTYPE_DIVX311))
	{
		av_log(avctx, AV_LOG_VERBOSE,
		       "CrystalHD: BCM70012 doesn't support MPEG4-ASP/DivX/Xvid\n");
		goto fail;
	}

	ret = DtsSetInputFormat(priv->dev, &format);
	if (ret != BC_STS_SUCCESS)
	{
		av_log(avctx, AV_LOG_ERROR, "CrystalHD: SetInputFormat failed\n");
		goto fail;
	}

	ret = DtsOpenDecoder(priv->dev, BC_STREAM_TYPE_ES);
	if (ret != BC_STS_SUCCESS)
	{
		av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsOpenDecoder failed\n");
		goto fail;
	}

	ret = DtsSetColorSpace(priv->dev, OUTPUT_MODE422_YUY2);
	if (ret != BC_STS_SUCCESS)
	{
		av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsSetColorSpace failed\n");
		goto fail;
	}
	ret = DtsStartDecoder(priv->dev);
	if (ret != BC_STS_SUCCESS)
	{
		av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsStartDecoder failed\n");
		goto fail;
	}
	ret = DtsStartCapture(priv->dev);
	if (ret != BC_STS_SUCCESS)
	{
		av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsStartCapture failed\n");
		goto fail;
	}

	av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Init complete.\n");

	return 0;

fail:
	uninit(avctx);
	return -1;
}


/*
 * The CrystalHD doesn't report interlaced H.264 content in a way that allows
 * us to distinguish between specific cases that require different handling.
 * So, for now, we have to hard-code the behaviour we want.
 *
 * The default behaviour is to assume MBAFF with input and output fieldpairs.
 *
 * Define ASSUME_PAFF_OVER_MBAFF to treat input as PAFF with separate input
 * and output fields.
 *
 * Define ASSUME_TWO_INPUTS_ONE_OUTPUT to treat input as separate fields but
 * output as a single fieldpair.
 *
 * Define both to mess up your playback.
 */
#define ASSUME_PAFF_OVER_MBAFF 0
#define ASSUME_TWO_INPUTS_ONE_OUTPUT 0
static inline CopyRet copy_frame(AVCodecContext *avctx,
                                 BC_DTS_PROC_OUT *output,
                                 void *data, int *data_size,
                                 uint8_t second_field)
{
	BC_STATUS ret;
	BC_DTS_STATUS decoder_status;
	uint8_t is_paff;
	uint8_t next_frame_same;
	uint8_t interlaced;

	CHDContext *priv = avctx->priv_data;

	uint8_t bottom_field = (output->PicInfo.flags & VDEC_FLAG_BOTTOMFIELD) ==
	                       VDEC_FLAG_BOTTOMFIELD;
	uint8_t bottom_first = !!(output->PicInfo.flags & VDEC_FLAG_BOTTOM_FIRST);

	int width    = output->PicInfo.width;
	int height   = output->PicInfo.height;
	int bwidth;
	uint8_t *src = output->Ybuff;
	int sStride;
	uint8_t *dst;
	int dStride;

	ret = DtsGetDriverStatus(priv->dev, &decoder_status);
	if (ret != BC_STS_SUCCESS)
	{
		av_log(avctx, AV_LOG_ERROR,
		       "CrystalHD: GetDriverStatus failed: %u\n", ret);
		return RET_ERROR;
	}

	is_paff           = ASSUME_PAFF_OVER_MBAFF ||
	                    !(output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC);
	next_frame_same   = output->PicInfo.picture_number ==
	                    (decoder_status.picNumFlags & ~0x40000000);
	interlaced        = ((output->PicInfo.flags &
	                      VDEC_FLAG_INTERLACED_SRC) && is_paff) ||
	                    next_frame_same || bottom_field || second_field;

	av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: next_frame_same: %u | %u | %u\n",
	       next_frame_same, output->PicInfo.picture_number,
	       decoder_status.picNumFlags & ~0x40000000);

	if (priv->pic.data[0] && !priv->need_second_field)
		avctx->release_buffer(avctx, &priv->pic);

	priv->need_second_field = interlaced && !priv->need_second_field;

	priv->pic.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE |
	                         FF_BUFFER_HINTS_REUSABLE;
	if (!priv->pic.data[0])
	{
		if (avctx->get_buffer(avctx, &priv->pic) < 0)
		{
			av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
			return RET_ERROR;
		}
	}

	bwidth = av_image_get_linesize(avctx->pix_fmt, width, 0);
	if (priv->is_70012)
	{
		int pStride;

		if (width <= 720)
			pStride = 720;
		else if (width <= 1280)
			pStride = 1280;
		else if (width <= 1080)
			pStride = 1080;
		sStride = av_image_get_linesize(avctx->pix_fmt, pStride, 0);
	}
	else
	{
		sStride = bwidth;
	}

	dStride = priv->pic.linesize[0];
	dst     = priv->pic.data[0];

	av_log(priv->avctx, AV_LOG_VERBOSE, "CrystalHD: Copying out frame\n");

	if (interlaced)
	{
		int dY = 0;
		int sY = 0;

		height /= 2;
		if (bottom_field)
		{
			av_log(priv->avctx, AV_LOG_VERBOSE, "Interlaced: bottom field\n");
			dY = 1;
		}
		else
		{
			av_log(priv->avctx, AV_LOG_VERBOSE, "Interlaced: top field\n");
			dY = 0;
		}

		for (sY = 0; sY < height; dY++, sY++)
		{
			memcpy(&(dst[dY * dStride]), &(src[sY * sStride]), bwidth);
			dY++;
		}
	}
	else
	{
		av_image_copy_plane(dst, dStride, src, sStride, bwidth, height);
	}

	priv->pic.interlaced_frame = interlaced;
	if (interlaced)
		priv->pic.top_field_first = !bottom_first;

	if (output->PicInfo.timeStamp != 0)
	{
		priv->pic.pkt_pts = opaque_list_pop(priv, output->PicInfo.timeStamp);
		av_log(avctx, AV_LOG_VERBOSE, "output \"pts\": %"PRIu64"\n",
		       priv->pic.pkt_pts);
	}

	if (!priv->need_second_field)
	{
		*data_size       = sizeof(AVFrame);
		*(AVFrame *)data = priv->pic;
	}

	if (ASSUME_TWO_INPUTS_ONE_OUTPUT &&
	        output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC)
	{
		av_log(priv->avctx, AV_LOG_VERBOSE, "Fieldpair from two packets.\n");
		return RET_SKIP_NEXT_COPY;
	}

	return RET_OK;
}
コード例 #17
0
ファイル: vf_showinfo.c プロジェクト: twinaphex/vice-libretro
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
    AVFilterContext *ctx = inlink->dst;
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
    uint32_t plane_checksum[4] = {0}, checksum = 0;
    int64_t sum[4] = {0}, sum2[4] = {0};
    int32_t pixelcount[4] = {0};
    int i, plane, vsub = desc->log2_chroma_h;
#ifdef IDE_COMPILE
	char tmp1[32] = {0};
	char tmp2[32] = {0};
#endif

    for (plane = 0; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++) {
        int64_t linesize = av_image_get_linesize(frame->format, frame->width, plane);
        uint8_t *data = frame->data[plane];
        int h = plane == 1 || plane == 2 ? FF_CEIL_RSHIFT(inlink->h, vsub) : inlink->h;

        if (linesize < 0)
            return linesize;

        for (i = 0; i < h; i++) {
            plane_checksum[plane] = av_adler32_update(plane_checksum[plane], data, linesize);
            checksum = av_adler32_update(checksum, data, linesize);

            update_sample_stats(data, linesize, sum+plane, sum2+plane);
            pixelcount[plane] += linesize;
            data += frame->linesize[plane];
        }
    }

#ifdef IDE_COMPILE
	av_log(ctx, AV_LOG_INFO,
           "n:%"PRId64" pts:%s pts_time:%s pos:%"PRId64" "
           "fmt:%s sar:%d/%d s:%dx%d i:%c iskey:%d type:%c "
           "checksum:%08"PRIX32" plane_checksum:[%08"PRIX32,
           inlink->frame_count,
           av_ts_make_string(tmp1, frame->pts), av_ts_make_time_string(tmp2, frame->pts, &inlink->time_base), av_frame_get_pkt_pos(frame),
           desc->name,
           frame->sample_aspect_ratio.num, frame->sample_aspect_ratio.den,
           frame->width, frame->height,
           !frame->interlaced_frame ? 'P' :         /* Progressive  */
           frame->top_field_first   ? 'T' : 'B',    /* Top / Bottom */
           frame->key_frame,
           av_get_picture_type_char(frame->pict_type),
           checksum, plane_checksum[0]);
#else
	av_log(ctx, AV_LOG_INFO,
           "n:%"PRId64" pts:%s pts_time:%s pos:%"PRId64" "
           "fmt:%s sar:%d/%d s:%dx%d i:%c iskey:%d type:%c "
           "checksum:%08"PRIX32" plane_checksum:[%08"PRIX32,
           inlink->frame_count,
           av_ts2str(frame->pts), av_ts2timestr(frame->pts, &inlink->time_base), av_frame_get_pkt_pos(frame),
           desc->name,
           frame->sample_aspect_ratio.num, frame->sample_aspect_ratio.den,
           frame->width, frame->height,
           !frame->interlaced_frame ? 'P' :         /* Progressive  */
           frame->top_field_first   ? 'T' : 'B',    /* Top / Bottom */
           frame->key_frame,
           av_get_picture_type_char(frame->pict_type),
           checksum, plane_checksum[0]);
#endif

    for (plane = 1; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++)
        av_log(ctx, AV_LOG_INFO, " %08"PRIX32, plane_checksum[plane]);
    av_log(ctx, AV_LOG_INFO, "] mean:[");
    for (plane = 0; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++)
        av_log(ctx, AV_LOG_INFO, "%"PRId64" ", (sum[plane] + pixelcount[plane]/2) / pixelcount[plane]);
    av_log(ctx, AV_LOG_INFO, "\b] stdev:[");
    for (plane = 0; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++)
        av_log(ctx, AV_LOG_INFO, "%3.1f ",
               sqrt((sum2[plane] - sum[plane]*(double)sum[plane]/pixelcount[plane])/pixelcount[plane]));
    av_log(ctx, AV_LOG_INFO, "\b]\n");

    for (i = 0; i < frame->nb_side_data; i++) {
        AVFrameSideData *sd = frame->side_data[i];

        av_log(ctx, AV_LOG_INFO, "  side data - ");
        switch (sd->type) {
        case AV_FRAME_DATA_PANSCAN:
            av_log(ctx, AV_LOG_INFO, "pan/scan");
            break;
        case AV_FRAME_DATA_A53_CC:
            av_log(ctx, AV_LOG_INFO, "A/53 closed captions (%d bytes)", sd->size);
            break;
        case AV_FRAME_DATA_STEREO3D:
            dump_stereo3d(ctx, sd);
            break;
        case AV_FRAME_DATA_DISPLAYMATRIX:
            av_log(ctx, AV_LOG_INFO, "displaymatrix: rotation of %.2f degrees",
                   av_display_rotation_get((int32_t *)sd->data));
            break;
        case AV_FRAME_DATA_AFD:
            av_log(ctx, AV_LOG_INFO, "afd: value of %"PRIu8, sd->data[0]);
            break;
        default:
            av_log(ctx, AV_LOG_WARNING, "unknown side data type %d (%d bytes)",
                   sd->type, sd->size);
            break;
        }

        av_log(ctx, AV_LOG_INFO, "\n");
    }

    return ff_filter_frame(inlink->dst->outputs[0], frame);
}